vgritsenko 2003/12/18 07:05:20
Modified: java/src/org/apache/xindice/core/filer BTree.java
BTreeFiler.java HashFiler.java Paged.java
Log:
Multiple changes:
* WeakHashMaps: Store WeakReferences to objects, not objects themselfs;
review synchronization.
* Mark some member variables as final, review synchronization, review access
level
(public -> private)
* Paged: Change type of Paged.Page.pageNum to Long to hold page's key object
* Paged: descriptors synchronization reviewed
* BTree: Remove unused BTree.BTreeNode.root
* BTreeFiler: Corrected BTreeNode size calculation, which caused late split()s
* HashFiler: synchronize access by a page head of collision chain
* Paged: close() will try to close all file descriptors, not only in the cache
* Paged: dirty pages & flush() code reviewed
There are still some synchronization issues left, but the concurrent write
test
(when executed alone) works well now.
Revision Changes Path
1.21 +146 -143
xml-xindice/java/src/org/apache/xindice/core/filer/BTree.java
Index: BTree.java
===================================================================
RCS file:
/home/cvs/xml-xindice/java/src/org/apache/xindice/core/filer/BTree.java,v
retrieving revision 1.20
retrieving revision 1.21
diff -u -r1.20 -r1.21
--- BTree.java 15 Aug 2003 03:08:36 -0000 1.20
+++ BTree.java 18 Dec 2003 15:05:20 -0000 1.21
@@ -72,6 +72,7 @@
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
+import java.lang.ref.WeakReference;
import java.util.Arrays;
import java.util.Map;
import java.util.WeakHashMap;
@@ -82,7 +83,8 @@
* map-based indexing. HashFiler uses the BTree as a set for
* producing RecordSet entries. The Indexers use BTree as a map for
* indexing entity and attribute values in Documents.
- * <br><br>
+ *
+ * <br>
* For those who don't know how a Simple-Prefix B+Tree works, the primary
* distinction is that instead of promoting actual keys to branch pages,
* when leaves are split, a shortest-possible separator is generated at
@@ -93,10 +95,12 @@
* deletions occur. Deletions only affect leaf pages in this
* implementation, and so it is entirely possible for a leaf page to be
* completely empty after all of its keys have been removed.
- * <br><br>
+ *
+ * <br>
* Also, the Variable Magnitude attribute means that the btree attempts
* to store as many values and pointers on one page as is possible.
- * <br><br>
+ *
+ * <br>
* This implementation supports the notion of nested roots. This means
* that you can create a btree where the pointers actually point to the
* root of a separate btree being managed in the same file.
@@ -111,7 +115,13 @@
protected static final byte BRANCH = 2;
protected static final byte STREAM = 3;
- private Map cache = new WeakHashMap();
+ /**
+ * Cache of the recently used tree nodes.
+ *
+ * Cache contains weak references to the BTreeNode objects, keys are
page numbers (Long objects).
+ * Access synchronized by this map itself.
+ */
+ private final Map cache = new WeakHashMap();
private BTreeFileHeader fileHeader;
private BTreeRootInfo rootInfo;
@@ -133,7 +143,7 @@
if (super.open()) {
long p = fileHeader.getRootPage();
rootInfo = new BTreeRootInfo(p);
- rootNode = getBTreeNode(rootInfo, p, null);
+ rootNode = getBTreeNode(p, null);
return true;
} else {
return false;
@@ -143,19 +153,23 @@
public boolean create() throws DBException {
if (super.create()) {
try {
- open();
+ // Don't call this.open() as it will try to read rootNode
from the disk
+ super.open();
long p = fileHeader.getRootPage();
rootInfo = new BTreeRootInfo(p);
- rootNode = new BTreeNode(rootInfo, getPage(p));
+
+ // Initialize root node
+ rootNode = new BTreeNode(getPage(p), null, new Value[0], new
long[0]);
rootNode.ph.setStatus(LEAF);
- rootNode.setValues(new Value[0]);
- rootNode.setPointers(new long[0]);
rootNode.write();
+ synchronized (cache) {
+ cache.put(rootNode.page.getPageNum(), new
WeakReference(rootNode));
+ }
close();
return true;
} catch (Exception e) {
if (log.isWarnEnabled()) {
- log.warn("ignored exception", e);
+ log.warn("Failed to create BTree, return false", e);
}
}
}
@@ -268,10 +282,10 @@
* @return The new BTreeRootInfo instance
*/
protected final BTreeRootInfo createBTreeRoot(Value v) throws
IOException, BTreeException {
- BTreeNode n = createBTreeNode(rootInfo, BTree.LEAF, null);
+ BTreeNode n = createBTreeNode(BTree.LEAF, null);
n.write();
- long position = n.page.getPageNum();
+ long position = n.page.getPageNum().longValue();
addValue(v, position);
return new BTreeRootInfo(v, position);
}
@@ -285,10 +299,10 @@
* @return The new BTreeRootInfo instance
*/
protected final BTreeRootInfo createBTreeRoot(BTreeRootInfo root, Value
v) throws IOException, BTreeException {
- BTreeNode n = createBTreeNode(root, BTree.LEAF, null);
+ BTreeNode n = createBTreeNode(BTree.LEAF, null);
n.write();
- long position = n.page.getPageNum();
+ long position = n.page.getPageNum().longValue();
addValue(v, position);
return new BTreeRootInfo(root, v, position);
}
@@ -323,6 +337,8 @@
* setRootNode resets the root for the specified root object to the
* provided BTreeNode's page number.
*
+ * This method is not thread safe.
+ *
* @param root The root to reset
* @param newRoot the new root node to use
*/
@@ -330,11 +346,11 @@
BTreeRootInfo parent = root.getParent();
if (parent == null) {
rootNode = newRoot;
- long p = rootNode.page.getPageNum();
+ long p = rootNode.page.getPageNum().longValue();
rootInfo.setPage(p);
fileHeader.setRootPage(p);
} else {
- long p = newRoot.page.getPageNum();
+ long p = newRoot.page.getPageNum().longValue();
root.setPage(p);
addValue(parent, root.name, p);
}
@@ -344,6 +360,8 @@
* setRootNode resets the file's root to the provided
* BTreeNode's page number.
*
+ * This method is not thread safe.
+ *
* @param rootNode the new root node to use
*/
protected final void setRootNode(BTreeNode rootNode) throws IOException,
BTreeException {
@@ -361,7 +379,7 @@
if (root.page == rootInfo.page) {
return rootNode;
} else {
- return getBTreeNode(root, root.getPage(), null);
+ return getBTreeNode(root.getPage(), null);
}
}
@@ -374,41 +392,47 @@
return rootNode;
}
- private BTreeNode getBTreeNode(BTreeRootInfo root, long page, BTreeNode
parent) {
+ private BTreeNode getBTreeNode(long page, BTreeNode parent) {
try {
- BTreeNode node;
- synchronized (this) {
- Long pNum = new Long(page);
- node = (BTreeNode) cache.get(pNum);
+ BTreeNode node = null;
+ synchronized (cache) {
+ WeakReference ref = (WeakReference) cache.get(new
Long(page));
+ if (ref != null) {
+ node = (BTreeNode) ref.get();
+ }
+
if (node == null) {
- Page p = getPage(pNum);
- node = new BTreeNode(root, p, parent);
+ node = new BTreeNode(getPage(page), parent);
} else {
- node.root = root;
node.parent = parent;
}
+
+ cache.put(node.page.getPageNum(), new WeakReference(node));
}
- synchronized (node) {
- if (!node.isLoaded()) {
- node.read();
- node.setLoaded(true);
- }
- }
+
+ node.read();
return node;
} catch (Exception e) {
+ if (log.isWarnEnabled()) {
+ log.warn("Ignored exception", e);
+ }
return null;
}
}
- private BTreeNode createBTreeNode(BTreeRootInfo root, byte status,
BTreeNode parent) {
+ private BTreeNode createBTreeNode(byte status, BTreeNode parent) {
try {
Page p = getFreePage();
- BTreeNode node = new BTreeNode(root, p, parent);
+ BTreeNode node = new BTreeNode(p, parent, new Value[0], new
long[0]);
node.ph.setStatus(status);
- node.setValues(new Value[0]);
- node.setPointers(new long[0]);
+ synchronized (cache) {
+ cache.put(p.getPageNum(), new WeakReference(node));
+ }
return node;
} catch (Exception e) {
+ if (log.isWarnEnabled()) {
+ log.warn("Ignored exception", e);
+ }
return null;
}
}
@@ -416,10 +440,9 @@
/**
* BTreeRootInfo
*/
-
public final class BTreeRootInfo {
- private BTreeRootInfo parent;
- private Value name;
+ private final BTreeRootInfo parent;
+ private final Value name;
private long page;
public BTreeRootInfo(BTreeRootInfo parent, String name, long page) {
@@ -452,11 +475,11 @@
this.page = page;
}
- public synchronized BTreeRootInfo getParent() {
+ public BTreeRootInfo getParent() {
return parent;
}
- public synchronized Value getName() {
+ public Value getName() {
return name;
}
@@ -472,75 +495,66 @@
/**
* BTreeNode
*/
-
private final class BTreeNode {
- private BTreeRootInfo root;
- private Page page;
- private BTreePageHeader ph;
+ private final Page page;
+ private final BTreePageHeader ph;
private Value[] values;
private long[] ptrs;
private BTreeNode parent;
private boolean loaded;
- public BTreeNode(BTreeRootInfo root, Page page, BTreeNode parent) {
- this.root = root;
- this.page = page;
- this.parent = parent;
- ph = (BTreePageHeader) page.getPageHeader();
+ public BTreeNode(Page page) {
+ this(page, null);
}
- public BTreeNode(BTreeRootInfo root, Page page) {
- this.root = root;
+ public BTreeNode(Page page, BTreeNode parent) {
this.page = page;
- ph = (BTreePageHeader) page.getPageHeader();
- }
-
- public synchronized void setValues(Value[] values) {
- this.values = values;
- ph.setValueCount((short) values.length);
+ this.parent = parent;
+ this.ph = (BTreePageHeader) page.getPageHeader();
}
- public synchronized Value[] getValues() {
- return values;
+ public BTreeNode(Page page, BTreeNode parent, Value[] values, long[]
ptrs) {
+ this(page, parent);
+ set(values, ptrs);
+ this.loaded = true;
}
- public synchronized void setPointers(long[] ptrs) {
+ /**
+ * Sets values and pointers.
+ * Internal (to the BTreeNode) method, not synchronized.
+ */
+ private void set(Value[] values, long[] ptrs) {
+ this.values = values;
+ this.ph.setValueCount((short) values.length);
this.ptrs = ptrs;
}
- public synchronized long[] getPointers() {
- return ptrs;
- }
-
- public synchronized boolean isLoaded() {
- return loaded;
- }
-
- public synchronized void setLoaded(boolean loaded) {
- this.loaded = loaded;
- }
-
+ /**
+ * Reads node only if it is not loaded yet
+ */
public synchronized void read() throws IOException {
- Value v = readValue(page);
- DataInputStream is = new DataInputStream(v.getInputStream());
+ if (!this.loaded) {
+ Value v = readValue(page);
+ DataInputStream is = new DataInputStream(v.getInputStream());
+
+ // Read in the Values
+ values = new Value[ph.getValueCount()];
+ for (int i = 0; i < values.length; i++) {
+ short valSize = is.readShort();
+ byte[] b = new byte[valSize];
- // Read in the Values
- values = new Value[ph.getValueCount()];
- for (int i = 0; i < values.length; i++) {
- short valSize = is.readShort();
- byte[] b = new byte[valSize];
+ is.read(b);
+ values[i] = new Value(b);
+ }
- is.read(b);
- values[i] = new Value(b);
- }
+ // Read in the pointers
+ ptrs = new long[ph.getPointerCount()];
+ for (int i = 0; i < ptrs.length; i++) {
+ ptrs[i] = is.readLong();
+ }
- // Read in the pointers
- ptrs = new long[ph.getPointerCount()];
- for (int i = 0; i < ptrs.length; i++) {
- ptrs[i] = is.readLong();
+ this.loaded = true;
}
-
- cache.put(new Long(page.getPageNum()), this);
}
public synchronized void write() throws IOException {
@@ -559,39 +573,29 @@
}
writeValue(page, new Value(bos.toByteArray()));
-
- cache.put(new Long(page.getPageNum()), this);
}
- public BTreeNode getChildNode(int idx) {
- boolean load;
- BTreeRootInfo loadNode;
- long loadPtr;
- synchronized (this) {
- if (ph.getStatus() == BRANCH && idx >= 0 && idx <
ptrs.length) {
- load = true;
- loadNode = root;
- loadPtr = ptrs[idx];
- } else {
- load = false;
- loadNode = null;
- loadPtr = 0;
- }
- }
- if (load) {
- return getBTreeNode(loadNode, loadPtr, this);
+ /**
+ * Internal (to the BTreeNode) method.
+ * Because this method is called only by BTreeNode itself, no
synchronization done inside of this method.
+ */
+ private BTreeNode getChildNode(int idx) {
+ if (ph.getStatus() == BRANCH && idx >= 0 && idx < ptrs.length) {
+ return getBTreeNode(ptrs[idx], this);
} else {
return null;
}
}
- public synchronized void getChildStream(int idx, Streamable stream)
throws IOException {
+ /* Not used
+ private synchronized void getChildStream(int idx, Streamable stream)
throws IOException {
if (ph.getStatus() == LEAF && idx >= 0 && idx < ptrs.length) {
Value v = readValue(ptrs[idx]);
DataInputStream dis = new
DataInputStream(v.getInputStream());
stream.read(dis);
}
}
+ */
public synchronized long removeValue(Value value) throws
IOException, BTreeException {
int idx = Arrays.binarySearch(values, value);
@@ -607,8 +611,7 @@
} else {
long oldPtr = ptrs[idx];
- setValues(deleteArrayValue(values, idx));
- setPointers(deleteArrayLong(ptrs, idx));
+ set(deleteArrayValue(values, idx),
deleteArrayLong(ptrs, idx));
write();
return oldPtr;
@@ -638,8 +641,7 @@
long oldPtr = ptrs[idx];
ptrs[idx] = pointer;
- setValues(values);
- setPointers(ptrs);
+ set(values, ptrs);
write();
return oldPtr;
@@ -648,10 +650,9 @@
idx = -(idx + 1);
// Check to see if we've exhausted the block
- boolean split = ph.getDataLen() + 6 +
value.getLength() > fileHeader.getWorkSize();
+ boolean split = ph.getDataLen() + 8 +
value.getLength() + 2 > fileHeader.getWorkSize();
- setValues(insertArrayValue(values, value, idx));
- setPointers(insertArrayLong(ptrs, pointer, idx));
+ set(insertArrayValue(values, value, idx),
insertArrayLong(ptrs, pointer, idx));
if (split) {
split();
@@ -666,15 +667,14 @@
}
}
- public synchronized void promoteValue(Value value, long
rightPointer) throws IOException, BTreeException {
+ private synchronized void promoteValue(Value value, long
rightPointer) throws IOException, BTreeException {
// Check to see if we've exhausted the block
- boolean split = ph.getDataLen() + 6 + value.getLength() >
fileHeader.getWorkSize();
+ boolean split = ph.getDataLen() + 8 + value.getLength() + 2 >
fileHeader.getWorkSize();
int idx = Arrays.binarySearch(values, value);
idx = idx < 0 ? -(idx + 1) : idx + 1;
- setValues(insertArrayValue(values, value, idx));
- setPointers(insertArrayLong(ptrs, rightPointer, idx + 1));
+ set(insertArrayValue(values, value, idx), insertArrayLong(ptrs,
rightPointer, idx + 1));
if (split) {
split();
@@ -683,14 +683,17 @@
}
}
- public Value getSeparator(Value value1, Value value2) {
+ private Value getSeparator(Value value1, Value value2) {
int idx = value1.compareTo(value2);
byte[] b = new byte[Math.abs(idx)];
System.arraycopy(value2.getData(), 0, b, 0, b.length);
return new Value(b);
}
- public synchronized void split() throws IOException, BTreeException {
+ /**
+ * Internal to the BTreeNode method
+ */
+ private void split() throws IOException, BTreeException {
Value[] leftVals;
Value[] rightVals;
long[] leftPtrs;
@@ -734,36 +737,37 @@
throw new BTreeCorruptException("Invalid Page Type In
split");
}
- setValues(leftVals);
- setPointers(leftPtrs);
-
// Promote the pivot to the parent branch
if (parent == null) {
// This can only happen if this is the root
- BTreeNode np = createBTreeNode(root, BRANCH, null);
-
- BTreeNode rNode = createBTreeNode(root, ph.getStatus(), np);
- rNode.setValues(rightVals);
- rNode.setPointers(rightPtrs);
-
- np.setValues(new Value[]{separator});
- np.setPointers(new long[]{page.getPageNum(),
rNode.page.getPageNum()});
+ BTreeNode rNode = createBTreeNode(ph.getStatus(), this);
+ rNode.set(rightVals, rightPtrs);
- parent = np;
+ BTreeNode lNode = createBTreeNode(ph.getStatus(), this);
+ lNode.set(leftVals, leftPtrs);
- setRootNode(root, np);
+ ph.setStatus(BRANCH);
+ set(new Value[] {
+ separator
+ },
+ new long[] {
+ lNode.page.getPageNum().longValue(),
+ rNode.page.getPageNum().longValue()
+ });
write();
rNode.write();
- np.write();
+ lNode.write();
} else {
- BTreeNode rNode = createBTreeNode(root, ph.getStatus(),
parent);
- rNode.setValues(rightVals);
- rNode.setPointers(rightPtrs);
+ set(leftVals, leftPtrs);
+
+ BTreeNode rNode = createBTreeNode(ph.getStatus(), parent);
+ rNode.set(rightVals, rightPtrs);
write();
rNode.write();
- parent.promoteValue(separator, rNode.page.getPageNum());
+ parent.promoteValue(separator,
+ rNode.page.getPageNum().longValue());
}
}
@@ -926,7 +930,7 @@
if (rightIdx < 0) {
rightIdx = -(rightIdx + 1);
}
-
+
for (int i = 0; i < ptrs.length; i++) {
if (i >= rightIdx &&
query.testValue(values[i])) {
callback.indexInfo(values[i],
ptrs[i]);
@@ -1053,7 +1057,6 @@
public synchronized void read(DataInputStream dis) throws
IOException {
super.read(dis);
-
if (getStatus() == UNUSED) {
return;
}
1.19 +6 -8
xml-xindice/java/src/org/apache/xindice/core/filer/BTreeFiler.java
Index: BTreeFiler.java
===================================================================
RCS file:
/home/cvs/xml-xindice/java/src/org/apache/xindice/core/filer/BTreeFiler.java,v
retrieving revision 1.18
retrieving revision 1.19
diff -u -r1.18 -r1.19
--- BTreeFiler.java 15 Aug 2003 03:08:36 -0000 1.18
+++ BTreeFiler.java 18 Dec 2003 15:05:20 -0000 1.19
@@ -89,6 +89,7 @@
private static final Log log = LogFactory.getLog(BTreeFiler.class);
+ // Page status
protected static final byte RECORD = 20;
private static final String PAGESIZE = "pagesize";
@@ -181,11 +182,11 @@
p = getPage(pos);
} catch (BTreeNotFoundException e) {
p = getFreePage();
- addValue(key, p.getPageNum());
+ addValue(key, p.getPageNum().longValue());
fileHeader.incRecordCount();
}
- BTreeFilerPageHeader ph = (BTreeFilerPageHeader)
p.getPageHeader();
+ BTreeFilerPageHeader ph = (BTreeFilerPageHeader)
p.getPageHeader();
long t = System.currentTimeMillis();
if (ph.getStatus() == UNUSED) {
ph.setCreated(t);
@@ -194,13 +195,11 @@
ph.setStatus(RECORD);
writeValue(p, value);
-
flush();
} catch (IOException e) {
throw new FilerException(FaultCodes.DBE_CANNOT_CREATE,
"Can't write record '" + key + "': " +
e.getMessage(), e);
}
-
return true;
}
@@ -215,12 +214,11 @@
Page p = getPage(pos);
removeValue(key);
- unlinkPages(p.getPageNum());
+ unlinkPages(p);
fileHeader.decRecordCount();
flush();
-
return true;
} catch (BTreeNotFoundException e) {
if (log.isDebugEnabled()) {
1.18 +80 -127
xml-xindice/java/src/org/apache/xindice/core/filer/HashFiler.java
Index: HashFiler.java
===================================================================
RCS file:
/home/cvs/xml-xindice/java/src/org/apache/xindice/core/filer/HashFiler.java,v
retrieving revision 1.17
retrieving revision 1.18
diff -u -r1.17 -r1.18
--- HashFiler.java 12 Aug 2003 02:57:29 -0000 1.17
+++ HashFiler.java 18 Dec 2003 15:05:20 -0000 1.18
@@ -88,7 +88,7 @@
* data and allocate Record space.
*
* @deprecated This class has been temporarily (maybe indefinitely)
- * deprecated by BTreeFiler.
+ * deprecated by BTreeFiler.
* @version CVS $Revision$, $Date$
*/
public final class HashFiler extends Paged implements Configurable, Filer {
@@ -102,7 +102,6 @@
private static final String MAXKEYSIZE = "maxkeysize";
private Configuration config;
private HashFileHeader fileHeader;
- //private BTree btree;
public HashFiler() {
@@ -120,7 +119,6 @@
public void setLocation(File root, String location) {
setFile(new File(root, location + ".tbl"));
- // btree = new BTree(new File(root, location + ".pkx"));
}
public String getName() {
@@ -131,7 +129,6 @@
if (super.open()) {
// These are the only properties that can be changed after
creation
fileHeader.setMaxKeySize(config.getShortAttribute(MAXKEYSIZE,
fileHeader.getMaxKeySize()));
- // btree.open();
return true;
} else {
return false;
@@ -142,49 +139,26 @@
fileHeader.setPageCount(config.getLongAttribute(PAGECOUNT,
fileHeader.getPageCount()));
fileHeader.setPageSize(config.getIntAttribute(PAGESIZE,
fileHeader.getPageSize()));
fileHeader.setMaxKeySize(config.getShortAttribute(MAXKEYSIZE,
fileHeader.getMaxKeySize()));
- //btree.create();
return super.create();
}
private Page seekRecordPage(Key key) throws IOException {
int hash = key.getHash();
long pageNum = hash % fileHeader.getPageCount();
- Page p = null;
- HashPageHeader ph = null;
- while (true) {
- p = getPage(pageNum);
- ph = (HashPageHeader) p.getPageHeader();
- if (ph.getStatus() == RECORD && ph.getKeyHash() == key.getHash()
&& p.getKey().equals(key)) {
- return p;
- }
-
- pageNum = ph.getNextCollision();
- if (pageNum == -1) {
- return null;
- }
- }
- }
-
- private Page seekInsertionPage(Key key) throws IOException {
- int hash = key.getHash();
- long pageNum = hash % fileHeader.getPageCount();
-
- log.trace("Hash page " + pageNum);
+ Page p = getPage(pageNum);
+ synchronized (p) {
+ HashPageHeader ph = null;
+ while (true) {
+ ph = (HashPageHeader) p.getPageHeader();
+ if (ph.getStatus() == RECORD && ph.getKeyHash() ==
key.getHash() && p.getKey().equals(key)) {
+ return p;
+ }
- Page p = null;
- HashPageHeader ph = null;
- while (true) {
- p = getPage(pageNum);
- ph = (HashPageHeader) p.getPageHeader();
- if ((ph.getStatus() == UNUSED || ph.getStatus() == DELETED) ||
(ph.getStatus() == RECORD && ph.getKeyHash() == key.getHash() &&
p.getKey().equals(key))) {
- return p;
- }
- pageNum = ph.getNextCollision();
- if (pageNum == -1) {
- Page np = getFreePage();
- ph.setNextCollision(np.getPageNum());
- p.write();
- return np;
+ pageNum = ph.getNextCollision();
+ if (pageNum == -1) {
+ return null;
+ }
+ p = getPage(pageNum);
}
}
}
@@ -223,26 +197,50 @@
}
checkOpened();
try {
- Page p = seekInsertionPage(key);
- HashPageHeader ph = (HashPageHeader) p.getPageHeader();
-
- long t = System.currentTimeMillis();
-
- // if ( ph.getStatus() == UNUSED ) {
- // This is a new Record
- fileHeader.incRecordCount();
+ int hash = key.getHash();
+ long pageNum = hash % fileHeader.getPageCount();
- // btree.addValue(key, p.getPageNum());
+ // Synchronize by chain head page
+ Page p = getPage(pageNum);
+ synchronized (p) {
+ HashPageHeader ph;
+ while (true) {
+ ph = (HashPageHeader) p.getPageHeader();
+ if (ph.getStatus() == UNUSED || ph.getStatus() == DELETED
+ || (ph.getStatus() == RECORD && ph.getKeyHash()
== key.getHash() && p.getKey().equals(key))) {
+ // Found free page
+ break;
+ }
- ph.setCreated(t);
+ pageNum = ph.getNextCollision();
+ if (pageNum == -1) {
+ // Reached end of chain, add new page
+ Page np = getFreePage();
+ ph.setNextCollision(np.getPageNum().longValue());
+ p.write();
+ p = np;
+ ph = (HashPageHeader) p.getPageHeader();
+ ph.setNextCollision(NO_PAGE);
+ break;
+ }
- // }
+ // Go to next page in chain
+ p = getPage(pageNum);
+ }
- ph.setModified(t);
- ph.setStatus(RECORD);
+ // Here we have a page
+ long t = System.currentTimeMillis();
+ if (ph.getStatus() == UNUSED || ph.getStatus() == DELETED) {
+ // This is a new Record
+ fileHeader.incRecordCount();
+ ph.setCreated(t);
+ }
+ ph.setModified(t);
+ ph.setStatus(RECORD);
- p.setKey(key);
- writeValue(p, value);
+ p.setKey(key);
+ writeValue(p, value);
+ }
flush();
} catch (Exception e) {
@@ -261,40 +259,38 @@
try {
int hash = key.getHash();
long pageNum = hash % fileHeader.getPageCount();
- Page prev = null;
- Page page = null;
- HashPageHeader prevHead = null;
- HashPageHeader pageHead = null;
+ Page page = getPage(pageNum);
+ synchronized(page) {
+ HashPageHeader prevHead = null;
+ HashPageHeader pageHead = null;
+
+ Page prev = null;
+ while (true) {
+ pageHead = (HashPageHeader) page.getPageHeader();
+ if (pageHead.getStatus() == RECORD &&
pageHead.getKeyHash() == key.getHash() && page.getKey().equals(key)) {
+ break;
+ }
- while (true) {
- page = getPage(pageNum);
- pageHead = (HashPageHeader) page.getPageHeader();
- if (pageHead.getStatus() == RECORD && pageHead.getKeyHash()
== key.getHash() && page.getKey().equals(key)) {
- break;
- }
- pageNum = pageHead.getNextCollision();
- if (pageNum == -1) {
- return false;
+ pageNum = pageHead.getNextCollision();
+ if (pageNum == NO_PAGE) {
+ return false;
+ }
+ prev = page;
+ prevHead = pageHead;
+ page = getPage(pageNum);
}
- prev = page;
- prevHead = pageHead;
- }
- if (page == null) {
- return false;
- }
+ if (prev != null) {
+ prevHead.setNextCollision(pageHead.nextCollision);
+ pageHead.setNextCollision(NO_PAGE);
+ prev.write();
+ }
- if (prev != null) {
- prevHead.setNextCollision(pageHead.nextCollision);
- prev.write();
+ unlinkPages(page);
}
- //btree.removeValue(key);
- unlinkPages(page);
-
fileHeader.decRecordCount();
-
flush();
return true;
@@ -368,47 +364,6 @@
}
}
- /**
- * HashFilerRecordSet
- */
- /* private class HashFilerRecordSet implements RecordSet,
BTreeCallback {
- private List keys = new ArrayList();
- private Iterator enum;
-
- public HashFilerRecordSet() {
- try {
- btree.query(null, this);
- enum = keys.iterator();
- }
- catch ( Exception e ) {
- if (log.isDebugEnabled()) {
- log.debug("No message", e);
- }
- }
- }
-
- public synchronized boolean indexInfo(Value value, long pointer) {
- keys.add(new Key(value));
- return true;
- }
-
- public synchronized Key getNextKey() {
- return (Key)enum.next();
- }
-
- public synchronized Record getNextRecord() throws DBException {
- return readRecord((Key)enum.next());
- }
-
- public synchronized Value getNextValue() throws DBException {
- return getNextRecord().getValue();
- }
-
- public synchronized boolean hasMoreRecords() {
- return enum.hasNext();
- }
- }*/
-
////////////////////////////////////////////////////////////////////
public FileHeader createFileHeader() {
@@ -434,7 +389,6 @@
/**
* HashFileHeader
*/
-
private final class HashFileHeader extends FileHeader {
private long totalBytes = 0;
@@ -478,11 +432,10 @@
/**
* HashPageHeader
*/
-
private final class HashPageHeader extends PageHeader {
private long created = 0;
private long modified = 0;
- private long nextCollision = -1;
+ private long nextCollision = NO_PAGE;
public HashPageHeader() {
}
1.22 +304 -224
xml-xindice/java/src/org/apache/xindice/core/filer/Paged.java
Index: Paged.java
===================================================================
RCS file:
/home/cvs/xml-xindice/java/src/org/apache/xindice/core/filer/Paged.java,v
retrieving revision 1.21
retrieving revision 1.22
diff -u -r1.21 -r1.22
--- Paged.java 6 Dec 2003 22:55:56 -0000 1.21
+++ Paged.java 18 Dec 2003 15:05:20 -0000 1.22
@@ -81,6 +81,9 @@
import java.util.Map;
import java.util.Stack;
import java.util.WeakHashMap;
+import java.util.Collection;
+import java.util.EmptyStackException;
+import java.lang.ref.WeakReference;
/**
* Paged is a paged file foundation that is used by both the BTree
@@ -93,40 +96,67 @@
private static final Log log = LogFactory.getLog(Paged.class);
- // The maximum number of pages that will be held in the dirty cache.
+ /**
+ * The maximum number of pages that will be held in the dirty cache.
+ * Once number reaches the limit, pages are flushed to disk.
+ */
private static final int MAX_DIRTY_SIZE = 128;
- // The maximum number of open random access files we can have
+ /**
+ * The maximum number of open random access files we can have
+ */
private static final int MAX_DESCRIPTORS = 16;
+ // Page status
protected static final byte UNUSED = 0;
protected static final byte OVERFLOW = 126;
protected static final byte DELETED = 127;
+ // Page ID of non-existent page
protected static final int NO_PAGE = -1;
- // Cache of recently read pages.
- private Map pages = new WeakHashMap();
+ /**
+ * Cache of recently read pages.
+ *
+ * Cache contains weak references to the Page objects, keys are page
numbers (Long objects).
+ * Access synchronized by this map itself.
+ */
+ private final Map pages = new WeakHashMap();
- // Cache of modified pages waiting to be written out.
+ /**
+ * Cache of modified pages waiting to be written out.
+ * Access synchronized by the [EMAIL PROTECTED] #dirtyLock}.
+ */
private Map dirty = new HashMap();
+ private final Object dirtyLock = new Object();
- private Object dirtyLock = new Object();
-
- // Random access file cache.
- private Stack descriptors = new Stack();
+ /**
+ * Random access file descriptors cache.
+ * Access to it and to [EMAIL PROTECTED] #descCount} is synchronized by
itself.
+ */
+ private final Stack descriptors = new Stack();
- // The number of random access file objects that exist. Either in the
cache
- // or in use.
+ /**
+ * The number of random access file objects that exist, either in the
+ * cache [EMAIL PROTECTED] #descriptors}, or currently in use.
+ */
private int descCount = 0;
- // Whether the file is opened or not.
+ /**
+ * Whether the file is opened or not.
+ */
private boolean opened = false;
- // The underlying file where the Paged object stores its pages.
+ /**
+ * The underlying file where the Paged object stores its pages.
+ */
private File file;
- private FileHeader fileHeader;
+ /**
+ * Header of this Paged
+ */
+ private final FileHeader fileHeader;
+
public Paged() {
fileHeader = createFileHeader();
@@ -155,27 +185,34 @@
return file;
}
- protected synchronized final RandomAccessFile getDescriptor() throws
IOException {
- // If there are descriptors in the cache return one.
- if (!descriptors.empty()) {
- return (RandomAccessFile) descriptors.pop();
- }
- // Otherwise we need to get one some other way.
- else {
- // First try to create a new one if there's room
- if (descCount < MAX_DESCRIPTORS) {
- descCount++;
- return new RandomAccessFile(file, "rw");
+ /**
+ * Obtain RandomAccessFile ('descriptor') object out of the pool.
+ * If no descriptors available, and maximum amount already allocated,
+ * the call will block.
+ */
+ protected final RandomAccessFile getDescriptor() throws IOException {
+ synchronized (descriptors) {
+ // If there are descriptors in the cache return one.
+ if (!descriptors.empty()) {
+ return (RandomAccessFile) descriptors.pop();
}
- // Otherwise we have to wait for one to be released by another
thread.
+ // Otherwise we need to get one some other way.
else {
- while (true) {
- try {
- wait();
- return (RandomAccessFile) descriptors.pop();
- } catch (Exception e) {
- if (log.isWarnEnabled()) {
- log.warn("ignored exception", e);
+ // First try to create a new one if there's room
+ if (descCount < MAX_DESCRIPTORS) {
+ descCount++;
+ return new RandomAccessFile(file, "rw");
+ }
+ // Otherwise we have to wait for one to be released by
another thread.
+ else {
+ while (true) {
+ try {
+ descriptors.wait();
+ return (RandomAccessFile) descriptors.pop();
+ } catch (InterruptedException e) {
+ // Ignore, and continue to wait
+ } catch (EmptyStackException e) {
+ // Ignore, and continue to wait
}
}
}
@@ -184,23 +221,44 @@
}
/**
- * Puts a RandomAccessFile/descriptor into the descriptor cache.
+ * Puts a RandomAccessFile ('descriptor') back into the descriptor pool.
*/
- protected synchronized final void putDescriptor(RandomAccessFile raf) {
+ protected final void putDescriptor(RandomAccessFile raf) {
if (raf != null) {
- descriptors.push(raf);
- notify();
+ synchronized (descriptors) {
+ descriptors.push(raf);
+ descriptors.notify();
+ }
+ }
+ }
+
+ /**
+ * Closes a RandomAccessFile ('descriptor') and removes it from the pool.
+ */
+ protected final void closeDescriptor(RandomAccessFile raf) {
+ if (raf != null) {
+ try {
+ raf.close();
+ } catch (IOException e) {
+ // Ignore close exception
+ }
+
+ // Synchronization is necessary as decrement operation is not
atomic
+ synchronized (descriptors) {
+ descCount --;
+ }
}
}
/**
* getPage returns the page specified by pageNum.
*
- * @param lp The Page number
+ * @param pageNum The Page number
* @return The requested Page
* @throws IOException if an Exception occurs
*/
- protected final Page getPage(Long lp) throws IOException {
+ protected final Page getPage(long pageNum) throws IOException {
+ final Long lp = new Long(pageNum);
Page p;
synchronized (this) {
// Check if it's in the dirty cache
@@ -209,39 +267,25 @@
// if not check if it's already loaded in the page cache
if (p == null) {
- p = (Page) pages.get(lp);
+ WeakReference ref = (WeakReference)pages.get(lp);
+ if (ref != null) {
+ p = (Page) ref.get();
+ }
}
// if still not found we need to create it and add it to the
page cache.
if (p == null) {
- p = new Page(lp.longValue());
- pages.put(lp, p);
+ p = new Page(lp);
+ pages.put(p.pageNum, new WeakReference(p));
}
}
// Load the page from disk if necessary
- synchronized (p) {
- if (!p.isLoaded()) {
- p.read();
- p.setLoaded(true);
- }
- }
-
+ p.read();
return p;
}
/**
- * getPage returns the page specified by pageNum.
- *
- * @param pageNum The Page number
- * @return The requested Page
- * @throws IOException if an Exception occurs
- */
- protected final Page getPage(long pageNum) throws IOException {
- return getPage(new Long(pageNum));
- }
-
- /**
* readValue reads the multi-Paged Value starting at the specified
* Page.
*
@@ -250,26 +294,23 @@
* @throws IOException if an Exception occurs
*/
protected final Value readValue(Page page) throws IOException {
- PageHeader sph = page.getPageHeader();
+ final PageHeader sph = page.getPageHeader();
ByteArrayOutputStream bos = new
ByteArrayOutputStream(sph.getRecordLen());
- Page p = page;
- PageHeader ph = null;
- long nextPage;
// Loop until we've read all the pages into memory.
+ Page p = page;
while (true) {
- ph = p.getPageHeader();
+ PageHeader ph = p.getPageHeader();
// Add the contents of the page onto the stream
p.streamTo(bos);
// Continue following the list of pages until we get to the end.
- nextPage = ph.getNextPage();
- if (nextPage != NO_PAGE) {
- p = getPage(nextPage);
- } else {
+ long nextPage = ph.getNextPage();
+ if (nextPage == NO_PAGE) {
break;
}
+ p = getPage(nextPage);
}
// Return a Value with the collected contents of all pages.
@@ -321,7 +362,7 @@
} else {
// Create a new overflow page
page = getFreePage();
- lhdr.setNextPage(page.getPageNum());
+ lhdr.setNextPage(page.getPageNum().longValue());
}
// Mark the page as an overflow page.
@@ -330,7 +371,6 @@
// Write some more of the value to the overflow page.
page.streamFrom(is);
-
lpage.write();
}
@@ -366,7 +406,8 @@
protected final void unlinkPages(Page page) throws IOException {
// Handle the page if it's in primary space by setting its status to
// DELETED and freeing any overflow pages linked to it.
- if (page.pageNum < fileHeader.pageCount) {
+ // FIXME: Revisit concurrency/synchronization issues
+ if (page.pageNum.longValue() < fileHeader.pageCount) {
long nextPage = page.header.nextPage;
page.header.setStatus(DELETED);
page.header.setNextPage(NO_PAGE);
@@ -383,13 +424,13 @@
// Add any overflow pages to the list of free pages.
if (page != null) {
// Get the first page in the chain.
- long firstPage = page.pageNum;
+ long firstPage = page.pageNum.longValue();
// Find the last page in the chain.
while (page.header.nextPage != NO_PAGE) {
page = getPage(page.header.nextPage);
}
- long lastPage = page.pageNum;
+ long lastPage = page.pageNum.longValue();
// If there are already some free pages, add the start of the
chain
// to the list of free pages.
@@ -432,6 +473,7 @@
long pageNum = fileHeader.firstFreePage;
if (pageNum != NO_PAGE) {
// Steal a deleted page
+ // FIXME: Sync issue here
p = getPage(pageNum);
fileHeader.setFirstFreePage(p.getPageHeader().nextPage);
if (fileHeader.firstFreePage == NO_PAGE) {
@@ -439,8 +481,7 @@
}
} else {
// Grow the file
- pageNum = fileHeader.totalCount;
- fileHeader.setTotalCount(pageNum + 1);
+ pageNum = fileHeader.incTotalCount();
p = getPage(pageNum);
}
@@ -476,17 +517,16 @@
raf = getDescriptor();
fileHeader.write();
flush();
- raf.close();
return true;
} catch (Exception e) {
- throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR, "Error
creating " + file.getName(), e);
+ throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR,
+ "Error creating " + file.getName(), e);
} finally {
- //putDescriptor(raf);
- descCount -= 1;
+ closeDescriptor(raf);
}
}
- public boolean open() throws DBException {
+ public synchronized boolean open() throws DBException {
RandomAccessFile raf = null;
try {
if (exists()) {
@@ -506,28 +546,40 @@
}
public synchronized boolean close() throws DBException {
- try {
- if (isOpened()) {
- flush();
+ if (isOpened()) {
+ try {
+ // First of all, mark as closed to prevent operations
opened = false;
- while (!descriptors.empty()) {
- try {
- RandomAccessFile raf = getDescriptor();
- raf.close();
- descCount -= 1;
- } catch (Exception e) {
- // TODO Hmmmm....
- if (log.isWarnEnabled()) {
- log.warn("ignored exception", e);
+ flush();
+
+ synchronized (descriptors) {
+ final int total = descCount;
+ // Close descriptors in cache
+ while (!descriptors.empty()) {
+ closeDescriptor((RandomAccessFile)descriptors.pop());
+ }
+ // Attempt to close descriptors in use. Max wait time =
0.5s * MAX_DESCRIPTORS
+ int n = descCount;
+ while (descCount > 0 && n > 0) {
+ descriptors.wait(500);
+ if (descriptors.isEmpty()) {
+ n--;
+ } else {
+
closeDescriptor((RandomAccessFile)descriptors.pop());
}
}
+ if (descCount > 0) {
+ log.warn(descCount + " out of " + total + " files
were not closed during close.");
+ }
}
- return true;
- } else
- return false;
- } catch (Exception e) {
- throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR, "Error
closing " + file.getName(), e);
+ } catch (Exception e) {
+ // Failed to close, leave open
+ opened = true;
+ throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR,
+ "Error closing " + file.getName(),
e);
+ }
}
+ return true;
}
public boolean isOpened() {
@@ -548,38 +600,56 @@
}
}
- public void flush() throws DBException {
- // TODO: Clean up this code
- boolean error = false;
- synchronized (this) {
- Map dirtyToFlush;
- synchronized (dirtyLock) {
- dirtyToFlush = dirty;
- dirty = new HashMap();
- }
- Iterator i = dirtyToFlush.values().iterator();
-
- while (i.hasNext()) {
- Page p = (Page) i.next();
+ void addDirty(Page page) throws IOException {
+ synchronized (dirtyLock) {
+ dirty.put(page.pageNum, page);
+ if (dirty.size() > MAX_DIRTY_SIZE) {
try {
- p.flush();
+ // Too many dirty pages... flush them
+ flush();
} catch (Exception e) {
- error = true;
+ throw new IOException(e.getMessage());
}
}
- dirtyToFlush.clear();
+ }
+ }
- if (fileHeader.dirty) {
- try {
- fileHeader.write();
- } catch (Exception e) {
- error = true;
- }
+ public void flush() throws DBException {
+ // This method is not synchronized
+
+ // Error flag/counter
+ int error = 0;
+
+ // Obtain collection of dirty pages
+ Collection pages;
+ synchronized (dirtyLock) {
+ pages = dirty.values();
+ dirty = new HashMap();
+ }
+
+ // Flush dirty pages
+ Iterator i = pages.iterator();
+ while (i.hasNext()) {
+ Page p = (Page) i.next();
+ try {
+ p.flush();
+ } catch (Exception e) {
+ error++;
+ }
+ }
+
+ // Flush header
+ if (fileHeader.dirty) {
+ try {
+ fileHeader.write();
+ } catch (Exception e) {
+ error++;
}
}
- if (error) {
- throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR, "Error
performing flush!");
+ if (error != 0) {
+ throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR,
+ "Error performing flush! Failed to
flush " + error + " pages!");
}
}
@@ -710,7 +780,7 @@
if (idx < vals.length) {
System.arraycopy(vals, idx, newVals, idx + 1, vals.length - idx);
}
-
+
return newVals;
}
@@ -722,7 +792,7 @@
if (idx < newVals.length) {
System.arraycopy(vals, idx + 1, newVals, idx, newVals.length -
idx);
}
-
+
return newVals;
}
@@ -730,7 +800,6 @@
/**
* FileHeader
*/
-
public abstract class FileHeader {
private boolean dirty = false;
private int workSize;
@@ -756,8 +825,8 @@
public FileHeader(long pageCount, int pageSize) {
this.pageSize = pageSize;
this.pageCount = pageCount;
- totalCount = pageCount;
- headerSize = (short) 4096;
+ this.totalCount = pageCount;
+ this.headerSize = (short) 4096;
calculateWorkSize();
}
@@ -827,7 +896,10 @@
return dirty;
}
- /** The size of the FileHeader. Usually 1 OS Page */
+ /**
+ * The size of the FileHeader. Usually 1 OS Page.
+ * This method should be called only while initializing Paged, not
during normal processing.
+ */
public synchronized final void setHeaderSize(short headerSize) {
this.headerSize = headerSize;
dirty = true;
@@ -838,7 +910,10 @@
return headerSize;
}
- /** The size of a page. Usually a multiple of a FS block */
+ /**
+ * The size of a page. Usually a multiple of a FS block.
+ * This method should be called only while initializing Paged, not
during normal processing.
+ */
public synchronized final void setPageSize(int pageSize) {
this.pageSize = pageSize;
calculateWorkSize();
@@ -850,7 +925,10 @@
return pageSize;
}
- /** The number of pages in primary storage */
+ /**
+ * The number of pages in primary storage.
+ * This method should be called only while initializing Paged, not
during normal processing.
+ */
public synchronized final void setPageCount(long pageCount) {
this.pageCount = pageCount;
dirty = true;
@@ -861,12 +939,20 @@
return pageCount;
}
- /** The number of total pages in the file */
+ /**
+ * The number of total pages in the file.
+ * This method should be called only while initializing Paged, not
during normal processing.
+ */
public synchronized final void setTotalCount(long totalCount) {
this.totalCount = totalCount;
dirty = true;
}
+ public synchronized final long incTotalCount() {
+ dirty = true;
+ return this.totalCount++;
+ }
+
/** The number of total pages in the file */
public synchronized final long getTotalCount() {
return totalCount;
@@ -933,12 +1019,6 @@
return maxKeySize;
}
- /** The number of records being managed by the file (not pages) */
- public synchronized final void setRecordCount(long recordCount) {
- this.recordCount = recordCount;
- dirty = true;
- }
-
/** Increment the number of records being managed by the file */
public synchronized final void incRecordCount() {
recordCount++;
@@ -969,7 +1049,7 @@
* PageHeader
*/
- public abstract class PageHeader implements Streamable {
+ public abstract static class PageHeader implements Streamable {
private boolean dirty = false;
private byte status = UNUSED;
private short keyLen = 0;
@@ -988,7 +1068,6 @@
public synchronized void read(DataInputStream dis) throws
IOException {
status = dis.readByte();
dirty = false;
-
if (status == UNUSED) {
return;
}
@@ -1097,121 +1176,130 @@
/**
* Page
*/
-
public final class Page implements Comparable {
- /** This page number */
- private long pageNum;
+ /**
+ * This page number
+ */
+ private final Long pageNum;
- /** The data for this page */
- private byte[] data;
+ /**
+ * The Header for this Page
+ */
+ private final PageHeader header;
+
+ /**
+ * The offset into the file that this page starts
+ */
+ private final long offset;
- /** The Header for this Page */
- private PageHeader header = createPageHeader();
+ /**
+ * The data for this page. Null if page is not loaded.
+ */
+ private byte[] data;
- /** The position (relative) of the Key in the data array */
+ /**
+ * The position (relative) of the Key in the data array
+ */
private int keyPos;
- /** The position (relative) of the Data in the data array */
+ /**
+ * The position (relative) of the Data in the data array
+ */
private int dataPos;
- /** The offset into the file that this page starts */
- private long offset;
-
- private boolean loaded;
-
- public Page() {
- }
- public Page(long pageNum) throws IOException {
- this();
- setPageNum(pageNum);
+ public Page(Long pageNum) throws IOException {
+ this.header = createPageHeader();
+ this.pageNum = pageNum;
+ this.offset = fileHeader.headerSize + (pageNum.longValue() *
fileHeader.pageSize);
}
+ /**
+ * Reads a page into the memory, once. Subsequent calls are ignored.
+ */
public synchronized void read() throws IOException {
- RandomAccessFile raf = null;
- try {
- data = new byte[fileHeader.pageSize];
-
- raf = getDescriptor();
- raf.seek(offset);
- raf.read(data);
-
- ByteArrayInputStream bis = new ByteArrayInputStream(data);
- DataInputStream dis = new DataInputStream(bis);
-
- // Read in the header
- header.read(dis);
-
- keyPos = fileHeader.pageHeaderSize;
- dataPos = keyPos + header.keyLen;
- } finally {
- putDescriptor(raf);
+ if (data == null) {
+ RandomAccessFile raf = null;
+ try {
+ byte[] data = new byte[fileHeader.pageSize];
+ raf = getDescriptor();
+ raf.seek(this.offset);
+ raf.read(data);
+
+ // Read in the header
+ ByteArrayInputStream bis = new
ByteArrayInputStream(data);
+ this.header.read(new DataInputStream(bis));
+
+ this.keyPos = fileHeader.pageHeaderSize;
+ this.dataPos = this.keyPos + this.header.keyLen;
+
+ // Successfully read all the data
+ this.data = data;
+ } finally {
+ putDescriptor(raf);
+ }
}
}
- public synchronized void write() throws IOException {
- // Write out the header
- ByteArrayOutputStream bos = new
ByteArrayOutputStream(fileHeader.getPageHeaderSize());
- DataOutputStream dos = new DataOutputStream(bos);
- header.write(dos);
- byte[] b = bos.toByteArray();
- System.arraycopy(b, 0, data, 0, b.length);
-
- synchronized (dirtyLock) {
- dirty.put(new Long(pageNum), this);
- if (dirty.size() > MAX_DIRTY_SIZE) {
- try {
- // Too many dirty pages... flush them
- Paged.this.flush();
- } catch (Exception e) {
- throw new IOException(e.getMessage());
- }
- }
+ /**
+ * Writes out the header into the this.data, and adds a page to the
set of
+ * dirty pages.
+ */
+ public void write() throws IOException {
+ // Write out the header into the this.data
+ synchronized (this) {
+ ByteArrayOutputStream bos = new
ByteArrayOutputStream(fileHeader.getPageHeaderSize());
+ header.write(new DataOutputStream(bos));
+ byte[] b = bos.toByteArray();
+ System.arraycopy(b, 0, data, 0, b.length);
}
+
+ // Add to the list of dirty pages
+ Paged.this.addDirty(this);
}
+ /**
+ * Flushes content of the dirty page into the file
+ */
public synchronized void flush() throws IOException {
RandomAccessFile raf = null;
try {
raf = getDescriptor();
- if (offset >= raf.length()) {
+ if (this.offset >= raf.length()) {
// Grow the file
long o = (fileHeader.headerSize +
((fileHeader.totalCount * 3) / 2) * fileHeader.pageSize) + (fileHeader.pageSize
- 1);
raf.seek(o);
raf.writeByte(0);
}
- raf.seek(offset);
- raf.write(data);
+ raf.seek(this.offset);
+ raf.write(this.data);
} finally {
putDescriptor(raf);
}
}
- public synchronized void setPageNum(long pageNum) {
- this.pageNum = pageNum;
- offset = fileHeader.headerSize + (pageNum * fileHeader.pageSize);
+ // No synchronization - pageNum is final
+ public Long getPageNum() {
+ return this.pageNum;
}
- public synchronized long getPageNum() {
- return pageNum;
- }
-
- public synchronized PageHeader getPageHeader() {
- return header;
+ // No synchronization - header is final
+ public PageHeader getPageHeader() {
+ return this.header;
}
public synchronized void setKey(Key key) {
header.setKey(key);
// Insert the key into the data array.
- key.copyTo(data, keyPos);
+ key.copyTo(this.data, this.keyPos);
// Set the start of data to skip over the key.
- dataPos = keyPos + header.keyLen;
+ this.dataPos = this.keyPos + header.keyLen;
}
public synchronized Key getKey() {
if (header.keyLen > 0) {
- return new Key(data, keyPos, header.keyLen);
+ return new Key(this.data, this.keyPos, header.keyLen);
} else {
return null;
}
@@ -1219,7 +1307,7 @@
public synchronized void streamTo(OutputStream os) throws
IOException {
if (header.dataLen > 0) {
- os.write(data, dataPos, header.dataLen);
+ os.write(this.data, this.dataPos, header.dataLen);
}
}
@@ -1230,21 +1318,13 @@
header.dataLen = avail;
}
if (header.dataLen > 0) {
- is.read(data, keyPos + header.keyLen, header.dataLen);
+ is.read(this.data, this.keyPos + header.keyLen,
header.dataLen);
}
}
- public synchronized boolean isLoaded() {
- return loaded;
- }
-
- public synchronized void setLoaded(boolean loaded) {
- this.loaded = loaded;
- }
-
- public synchronized int compareTo(Object o) {
- return (int) (pageNum - ((Page) o).pageNum);
+ // No synchronization: pageNum is final.
+ public int compareTo(Object o) {
+ return (int) (this.pageNum.longValue() - ((Page)
o).pageNum.longValue());
}
}
}
-