Sounds pretty good. About the potential issue you described: Writing
unit tests for the class can help you a long way catching potential
problems early. Might actually be worth it considering such a critical
components in the FO tree area. Just a thought.
Jeremias Maerki
On 15.11.2007 01:18:32 Andreas L Delmelle wrote:
>
> On Nov 14, 2007, at 21:38, Jeremias Maerki wrote:
>
> Hi Jeremias, Chris,
>
> > <jm-PropertyCache-MemLeak.diff.txt>
>
>
> My proposal, incorporating the changes in Jeremias' diff, below.
>
> To sum it up:
> Only one CacheCleaner per PropertyCache, and one accompanying thread.
> If, after a put(), cleanup seems to be needed *and* the thread is not
> alive, lock on the cleaner, and start the thread.
>
> If the thread is busy, I guess it suffices to continue, assuming that
> the hash distribution should eventually lead some put() back to same
> bucket/segment...?
>
> As Jeremias noted, currently rehash is called for every time an
> attempt to clean up fails. Maybe this needs improvement... OTOH,
> rehash now becomes a bit simpler, since it does not need to take into
> account interfering cleaners. Only one remaining: CacheCleaner.run()
> is now synchronized on the cleaner, and rehash() itself is done
> within the cleaner thread.
>
> What I see as a possible issue though, is that there is a theoretical
> limit to rehash() having any effect whatsoever. If the cache grows to
> 64 buckets, then the maximum number of segments that exceed the
> threshold can never be greater than half the table-size... This might
> be a non-issue, as this would only be triggered if the cache's size
> is at least 2048 instances (not counting the elements in the buckets
> that don't exceed the threshold). No problem for enums, keeps.
> Strings and numbers, though?
>
>
>
> Cheers
>
> Andreas
>
> Index: src/java/org/apache/fop/fo/properties/PropertyCache.java
> ===================================================================
> --- src/java/org/apache/fop/fo/properties/PropertyCache.java
> (revision 594306)
> +++ src/java/org/apache/fop/fo/properties/PropertyCache.java
> (working copy)
> @@ -41,6 +41,9 @@
> /** the table of hash-buckets */
> private CacheEntry[] table = new CacheEntry[8];
>
> + private CacheCleaner cleaner = new CacheCleaner();
> + private Thread cleanerThread = new Thread(cleaner);
> +
> /* same hash function as used by java.util.HashMap */
> private static int hash(Object x) {
> int h = x.hashCode();
> @@ -77,6 +80,10 @@
> this.hash = old.hash;
> }
>
> + boolean isCleared() {
> + return (ref == null || ref.get() == null);
> + }
> +
> }
>
> /* Wrapper objects to synchronize on */
> @@ -85,7 +92,7 @@
> }
>
> /*
> - * Class modeling a cleanup thread.
> + * Class modeling the cleanup thread.
> *
> * Once run() is called, the segment is locked and the hash-bucket
> * will be traversed, removing any obsolete entries.
> @@ -95,50 +102,51 @@
>
> private int hash;
>
> - CacheCleaner(int hash) {
> + CacheCleaner() {
> + }
> +
> + void init(int hash) {
> this.hash = hash;
> }
>
> public void run() {
> - //System.out.println("Cleaning segment " + this.segment);
> - CacheSegment segment = segments[this.hash & SEGMENT_MASK];
> - int oldCount;
> - int newCount;
> - synchronized (segment) {
> - oldCount = segment.count;
> - /* check first to see if another cleaner thread already
> - * pushed the number of entries back below the
> threshold
> - * if so, return immediately
> - */
> - if (segment.count < (2 * table.length)) {
> - return;
> - }
> -
> - int index = this.hash & (table.length - 1);
> - CacheEntry first = table[index];
> - WeakReference ref;
> - for (CacheEntry e = first; e != null; e = e.next) {
> - ref = e.ref;
> - if (ref != null && ref.get() == null) {
> - /* remove obsolete entry
> - /* 1. clear value, cause interference for
> non-blocking get() */
> - e.ref = null;
> -
> - /* 2. clone the segment, without the
> obsolete entry */
> - CacheEntry head = e.next;
> - for (CacheEntry c = first; c != e; c =
> c.next) {
> - head = new CacheEntry(c, head);
> + synchronized (this) {
> + //System.out.println("Cleaning segment " +
> this.segment);
> + CacheSegment segment = segments[this.hash &
> SEGMENT_MASK];
> + int oldCount;
> + int newCount;
> + synchronized (segment) {
> + oldCount = segment.count;
> +
> + int index = this.hash & (table.length - 1);
> + CacheEntry first = table[index];
> + for (CacheEntry e = first; e != null; e = e.next) {
> + if (e.isCleared()) {
> + /* remove obsolete entry
> + /* 1. clear value, cause interference
> for non-blocking get() */
> + e.ref = null;
> +
> + /* 2. clone the segment, without the
> obsolete entry */
> + CacheEntry head = e.next;
> + for (CacheEntry c = first; c != e; c =
> c.next) {
> + if (!c.isCleared()) {
> + head = new CacheEntry(c, head);
> + } else {
> + /* WeakReference cleared by the
> GC? */
> + segment.count--;
> + }
> + }
> + table[index] = head;
> + segment.count--;
> }
> - table[index] = head;
> - segment.count--;
> }
> + newCount = segment.count;
> }
> - newCount = segment.count;
> + if (oldCount == newCount) {
> + /* cleanup had no effect, try rehashing */
> + rehash(SEGMENT_MASK);
> + }
> }
> - if (oldCount == newCount) {
> - /* cleanup had no effect, try rehashing */
> - rehash(SEGMENT_MASK);
> - }
> }
> }
>
> @@ -174,11 +182,14 @@
> }
>
> if (segment.count > (2 * table.length)) {
> - /* launch cleanup in a separate thread,
> - * so it acquires its own lock, and put()
> - * can return immediately */
> - Thread cleaner = new Thread(new CacheCleaner(hash));
> - cleaner.start();
> + if (!cleanerThread.isAlive()) {
> + /* start cleanup thread, which acquires its own
> lock,
> + * so put() can return immediately */
> + synchronized (cleaner) {
> + cleaner.init(hash);
> + cleanerThread.start();
> + }
> + }
> }
> }
> }
> @@ -267,19 +278,15 @@
> CacheEntry[] newTable = new CacheEntry[newLength];
>
> int hash, idx;
> - WeakReference ref;
> Object o;
> newLength--;
> for (int i = table.length; --i >= 0;) {
> for (CacheEntry c = table[i]; c != null; c
> = c.next) {
> - ref = c.ref;
> - if (ref != null) {
> - if ((o = ref.get()) != null) {
> - hash = hash(o);
> - idx = hash & newLength;
> - newTable[idx] = new CacheEntry
> (c, newTable[idx]);
> - segments[hash &
> SEGMENT_MASK].count++;
> - }
> + if ((o = c.ref.get()) != null) {
> + hash = hash(o);
> + idx = hash & newLength;
> + newTable[idx] = new CacheEntry(c,
> newTable[idx]);
> + segments[hash & SEGMENT_MASK].count++;
> }
> }
> }
> @@ -296,6 +303,8 @@
> for (int i = SEGMENT_MASK + 1; --i >= 0;) {
> segments[i] = new CacheSegment();
> }
> + cleanerThread.setName("FOP PropertyCache Cleaner Thread");
> + cleanerThread.setDaemon(true);
> }
>
> /**