jenkins-bot has submitted this change and it was merged. ( 
https://gerrit.wikimedia.org/r/351212 )

Change subject: New: record physical and logical file size of saved pages
......................................................................


New: record physical and logical file size of saved pages

Bug: T160469
Change-Id: I4b021e7a9b43f2891e5b5d520928bafca536a68d
---
M app/src/main/java/okhttp3/CacheDelegate.java
M app/src/main/java/org/wikipedia/database/Database.java
M app/src/main/java/org/wikipedia/database/contract/ReadingListPageContract.java
A app/src/main/java/org/wikipedia/dataclient/okhttp/cache/DiskLruCacheUtil.java
M app/src/main/java/org/wikipedia/readinglist/page/ReadingListPageRow.java
M 
app/src/main/java/org/wikipedia/readinglist/page/database/ReadingListPageTable.java
M app/src/main/java/org/wikipedia/savedpages/SavedPageSyncService.java
M app/src/main/java/org/wikipedia/util/FileUtil.java
M app/src/test/java/okhttp3/internal/cache/CacheDelegateInterceptorTest.java
9 files changed, 360 insertions(+), 36 deletions(-)

Approvals:
  Dbrant: Looks good to me, approved
  jenkins-bot: Verified
  Mholloway: Looks good to me, but someone else must approve



diff --git a/app/src/main/java/okhttp3/CacheDelegate.java 
b/app/src/main/java/okhttp3/CacheDelegate.java
index f696b21..607c3ec 100644
--- a/app/src/main/java/okhttp3/CacheDelegate.java
+++ b/app/src/main/java/okhttp3/CacheDelegate.java
@@ -1,6 +1,7 @@
 package okhttp3;
 
 import android.support.annotation.NonNull;
+import android.support.annotation.Nullable;
 
 import java.io.IOException;
 
@@ -20,6 +21,18 @@
         this.cache = cache;
     }
 
+    @NonNull public DiskLruCache diskLruCache() {
+        return cache.cache;
+    }
+
+    @Nullable public DiskLruCache.Snapshot entry(@NonNull Request req) {
+        try {
+            return cache.cache.get(key(req.url().toString()));
+        } catch (IOException ignore) {
+            return null;
+        }
+    }
+
     // Copy of Cache.get(). Calling this method modifies the Cache. If the URL 
is present, it's
     // cache entry is moved to the head of the LRU queue. This method performs 
file I/O
     public boolean isCached(@NonNull String url) {
diff --git a/app/src/main/java/org/wikipedia/database/Database.java 
b/app/src/main/java/org/wikipedia/database/Database.java
index ac45469..611fbc9 100644
--- a/app/src/main/java/org/wikipedia/database/Database.java
+++ b/app/src/main/java/org/wikipedia/database/Database.java
@@ -17,7 +17,7 @@
 
 public class Database extends SQLiteOpenHelper {
     private static final String DATABASE_NAME = "wikipedia.db";
-    private static final int DATABASE_VERSION = 16;
+    private static final int DATABASE_VERSION = 17;
 
     private final DatabaseTable<?>[] databaseTables = {
             HistoryEntry.DATABASE_TABLE,
diff --git 
a/app/src/main/java/org/wikipedia/database/contract/ReadingListPageContract.java
 
b/app/src/main/java/org/wikipedia/database/contract/ReadingListPageContract.java
index 58b3a45..e950253 100644
--- 
a/app/src/main/java/org/wikipedia/database/contract/ReadingListPageContract.java
+++ 
b/app/src/main/java/org/wikipedia/database/contract/ReadingListPageContract.java
@@ -60,11 +60,38 @@
         StrColumn THUMBNAIL_URL = new StrColumn(TABLE_PAGE, "thumbnailUrl", 
"text");
         StrColumn DESCRIPTION = new StrColumn(TABLE_PAGE, "description", 
"text");
 
+        // The cumulative size in bytes for an offline page and all page 
resources downloaded by
+        // SavedPageSyncService. Null or 0 if DiskStatus.ONLINE, not yet 
downloaded, or not yet
+        // downloaded since these columns were added. Outdated if the saved 
page cache size is later
+        // exceeded and resources are evicted. Written to by 
SavedPageSyncService.
+        // Android appears to present the user with logical size in app 
settings so it is the
+        // preferred metric to display and physical size will likely never be 
used. Since quantities
+        // are aggregated across files, neither can be derived from the other.
+        // wc -c /data/data/org.wikipedia.dev/files/okhttp-cache/*.[0-9]|tail 
-n1
+        // stat -c %s /data/data/org.wikipedia.dev/files/okhttp-cache/*.[0-9]
+        LongColumn PHYSICAL_SIZE = new LongColumn(TABLE_PAGE, "physicalSize", 
"integer");
+        // du -c /data/data/org.wikipedia.dev/files/okhttp-cache/*.[0-9]|tail 
-n1
+        // Block size: stat -c %B 
/data/data/org.wikipedia.dev/files/okhttp-cache
+        LongColumn LOGICAL_SIZE = new LongColumn(TABLE_PAGE, "logicalSize", 
"integer");
+
+        // Example:
+        // 1 Download the Obama article.
+        //   - Physical size recorded by us is 5 729 692 bytes.
+        //   - Logical size recorded by us is 6 754 304 bytes (6 596 
kibibytes).
+        // 2 Terminate the app and check the sizes (note: journal size is 
never included):
+        //   - Physical: wc -c 
/data/data/org.wikipedia.dev/files/okhttp-cache/*.[0-9]|tail -n1 => 5 729 692 
bytes.
+        //   - Logical: du -c 
/data/data/org.wikipedia.dev/files/okhttp-cache/*.[0-9]|tail -n1 => 6 596 
kibibytes.
+        //   - The size of "data" is about 6 868 kibibytes (6.7070313 
mebibytes):
+        //     - Calculate the size of all data: du -c 
/data/data/org.wikipedia.dev|tail -n1 => 13 736 kibibytes.
+        //     - Subtract the size of the cache: du -c 
/data/data/org.wikipedia.dev/cache|tail -n1 => 6 868 kibibytes.
+        // 3 Open settings: data size is 6.71 mebibytes.
+        // 4 Dump the database records: sqlite3 
/data/data/org.wikipedia.dev/databases/wikipedia.db '.dump readinglistpage'
+
         String[] SELECTION = DbUtil.qualifiedNames(KEY);
         String[] ALL = DbUtil.qualifiedNames(ID, KEY, LIST_KEYS, SITE, LANG, 
NAMESPACE, TITLE,
-                DISK_PAGE_REV, MTIME, ATIME, THUMBNAIL_URL, DESCRIPTION);
+                DISK_PAGE_REV, MTIME, ATIME, THUMBNAIL_URL, DESCRIPTION, 
PHYSICAL_SIZE, LOGICAL_SIZE);
         String[] CONTENT = DbUtil.qualifiedNames(KEY, LIST_KEYS, SITE, LANG, 
NAMESPACE, TITLE,
-                DISK_PAGE_REV, MTIME, ATIME, THUMBNAIL_URL, DESCRIPTION);
+                DISK_PAGE_REV, MTIME, ATIME, THUMBNAIL_URL, DESCRIPTION, 
PHYSICAL_SIZE, LOGICAL_SIZE);
     }
 
     public static final HttpColumns<ReadingListPageRow> HTTP_COLS = new 
HttpColumns<>(TABLE_HTTP);
diff --git 
a/app/src/main/java/org/wikipedia/dataclient/okhttp/cache/DiskLruCacheUtil.java 
b/app/src/main/java/org/wikipedia/dataclient/okhttp/cache/DiskLruCacheUtil.java
new file mode 100644
index 0000000..e6a5133
--- /dev/null
+++ 
b/app/src/main/java/org/wikipedia/dataclient/okhttp/cache/DiskLruCacheUtil.java
@@ -0,0 +1,34 @@
+package org.wikipedia.dataclient.okhttp.cache;
+
+import android.support.annotation.Nullable;
+
+import okhttp3.internal.cache.DiskLruCache;
+
+public final class DiskLruCacheUtil {
+    // DiskLruCache.valueCount is the number of files used per cache entry and
+    // DiskLruCache.Snapshot. For OkHttp, the value is two. The first file is 
metadata (headers,
+    // certificate, ...) and often ~8 KiB on disk. The second file is the raw 
response body which is
+    // preserved as it was received with byte logicalSize equal to 
Content-Length header when positive.
+    private static final int OKHTTP_METADATA_FILE_INDEX = 0;
+    private static final int OKHTTP_RAW_BODY_FILE_INDEX = 1;
+
+    /** @return The response metadata logicalSize in bytes. */
+    public static long okHttpResponseMetadataSize(@Nullable 
DiskLruCache.Snapshot snapshot) {
+        if (snapshot == null) {
+            return 0;
+        }
+
+        return snapshot.getLength(OKHTTP_METADATA_FILE_INDEX);
+    }
+
+    /** @return The response body logicalSize in bytes. */
+    public static long okHttpResponseBodySize(@Nullable DiskLruCache.Snapshot 
snapshot) {
+        if (snapshot == null) {
+            return 0;
+        }
+
+        return snapshot.getLength(OKHTTP_RAW_BODY_FILE_INDEX);
+    }
+
+    private DiskLruCacheUtil() { }
+}
diff --git 
a/app/src/main/java/org/wikipedia/readinglist/page/ReadingListPageRow.java 
b/app/src/main/java/org/wikipedia/readinglist/page/ReadingListPageRow.java
index 40911e4..195b9d4 100644
--- a/app/src/main/java/org/wikipedia/readinglist/page/ReadingListPageRow.java
+++ b/app/src/main/java/org/wikipedia/readinglist/page/ReadingListPageRow.java
@@ -34,6 +34,16 @@
     @Nullable private String thumbnailUrl;
     @Nullable private String description;
 
+    // The size in bytes for an offline page and all page resources downloaded 
by
+    // SavedPageSyncService. Null or 0 if DiskStatus.ONLINE or not yet 
downloaded. Outdated if
+    // the saved page cache size is later exceeded and resources are evicted. 
Written to by
+    // SavedPageSyncService.
+    // @see ReadingListPageContract.PageCol.PHYSICAL_SIZE
+    // @see ReadingListPageContract.PageCol.LOGICAL_SIZE
+    @Nullable private final Long physicalSize;
+    // The size on disk in bytes.
+    @Nullable private final Long logicalSize;
+
     public static Builder<?> builder() {
         //noinspection rawtypes
         return new Builder();
@@ -102,6 +112,14 @@
         return description;
     }
 
+    @Nullable public Long physicalSize() {
+        return physicalSize;
+    }
+
+    @Nullable public Long logicalSize() {
+        return logicalSize;
+    }
+
     protected ReadingListPageRow(@NonNull Builder<?> builder) {
         key = builder.key;
         listKeys = new ArraySet<>(builder.listKeys);
@@ -113,6 +131,8 @@
         atime = builder.atime;
         thumbnailUrl = builder.thumbnailUrl;
         description = builder.description;
+        physicalSize = builder.physicalSize;
+        logicalSize = builder.logicalSize;
     }
 
     @SuppressWarnings("unchecked")
@@ -127,6 +147,8 @@
         private Long atime;
         private String thumbnailUrl;
         private String description;
+        private Long physicalSize;
+        private Long logicalSize;
 
         public Clazz copy(@NonNull ReadingListPageRow copy) {
             return key(copy.key)
@@ -137,7 +159,9 @@
                     .mtime(copy.mtime)
                     .atime(copy.atime)
                     .thumbnailUrl(copy.thumbnailUrl)
-                    .description(copy.description);
+                    .description(copy.description)
+                    .physicalSize(copy.physicalSize)
+                    .logicalSize(copy.logicalSize);
         }
 
         public Clazz key(@NonNull String key) {
@@ -196,6 +220,16 @@
             return (Clazz) this;
         }
 
+        public Clazz physicalSize(@Nullable Long physicalSize) {
+            this.physicalSize = physicalSize;
+            return (Clazz) this;
+        }
+
+        public Clazz logicalSize(@Nullable Long logicalSize) {
+            this.logicalSize = logicalSize;
+            return (Clazz) this;
+        }
+
         public ReadingListPageRow build() {
             validate();
             return new ReadingListPageRow(this);
diff --git 
a/app/src/main/java/org/wikipedia/readinglist/page/database/ReadingListPageTable.java
 
b/app/src/main/java/org/wikipedia/readinglist/page/database/ReadingListPageTable.java
index b109cb3..a80a9b0 100644
--- 
a/app/src/main/java/org/wikipedia/readinglist/page/database/ReadingListPageTable.java
+++ 
b/app/src/main/java/org/wikipedia/readinglist/page/database/ReadingListPageTable.java
@@ -16,6 +16,7 @@
 
 public class ReadingListPageTable extends DatabaseTable<ReadingListPageRow> {
     private static final int DB_VER_INTRODUCED = 12;
+    private static final int DB_VER_SIZE_ADDED = 17;
 
     public ReadingListPageTable() {
         super(ReadingListPageContract.TABLE_PAGE, 
ReadingListPageContract.Page.URI);
@@ -36,6 +37,8 @@
                 .atime(PageCol.ATIME.val(cursor))
                 .thumbnailUrl(PageCol.THUMBNAIL_URL.val(cursor))
                 .description(PageCol.DESCRIPTION.val(cursor))
+                .physicalSize(PageCol.PHYSICAL_SIZE.val(cursor))
+                .logicalSize(PageCol.LOGICAL_SIZE.val(cursor))
                 .build();
     }
 
@@ -56,6 +59,8 @@
                 cols.add(PageCol.THUMBNAIL_URL);
                 cols.add(PageCol.DESCRIPTION);
                 return cols.toArray(new Column<?>[cols.size()]);
+            case DB_VER_SIZE_ADDED:
+                return new Column<?>[]{PageCol.PHYSICAL_SIZE, 
PageCol.LOGICAL_SIZE};
             default:
                 return super.getColumnsAdded(version);
         }
@@ -74,6 +79,8 @@
         contentValues.put(PageCol.ATIME.getName(), row.atime());
         contentValues.put(PageCol.THUMBNAIL_URL.getName(), row.thumbnailUrl());
         contentValues.put(PageCol.DESCRIPTION.getName(), row.description());
+        contentValues.put(PageCol.PHYSICAL_SIZE.getName(), row.physicalSize());
+        contentValues.put(PageCol.LOGICAL_SIZE.getName(), row.logicalSize());
         return contentValues;
     }
 
diff --git 
a/app/src/main/java/org/wikipedia/savedpages/SavedPageSyncService.java 
b/app/src/main/java/org/wikipedia/savedpages/SavedPageSyncService.java
index 48f1162..ca6ccc8 100644
--- a/app/src/main/java/org/wikipedia/savedpages/SavedPageSyncService.java
+++ b/app/src/main/java/org/wikipedia/savedpages/SavedPageSyncService.java
@@ -5,10 +5,10 @@
 import android.support.annotation.NonNull;
 import android.support.annotation.Nullable;
 
-import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.wikipedia.WikipediaApp;
 import org.wikipedia.dataclient.WikiSite;
 import org.wikipedia.dataclient.okhttp.OkHttpConnectionFactory;
+import org.wikipedia.dataclient.okhttp.cache.DiskLruCacheUtil;
 import org.wikipedia.dataclient.okhttp.cache.SaveHeader;
 import org.wikipedia.dataclient.page.PageClient;
 import org.wikipedia.dataclient.page.PageClientFactory;
@@ -21,17 +21,22 @@
 import org.wikipedia.readinglist.page.database.ReadingListPageDao;
 import org.wikipedia.readinglist.page.database.disk.ReadingListPageDiskRow;
 import org.wikipedia.util.DimenUtil;
+import org.wikipedia.util.FileUtil;
 import org.wikipedia.util.UriUtil;
 import org.wikipedia.util.log.L;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
 import okhttp3.CacheControl;
 import okhttp3.CacheDelegate;
 import okhttp3.Request;
+import okhttp3.Response;
+import okhttp3.internal.cache.DiskLruCache;
 import retrofit2.Call;
 
 import static 
org.wikipedia.dataclient.okhttp.OkHttpConnectionFactory.SAVE_CACHE;
@@ -41,10 +46,12 @@
     @NonNull private final CacheDelegate cacheDelegate = new 
CacheDelegate(SAVE_CACHE);
     @NonNull private final PageImageUrlParser pageImageUrlParser
             = new PageImageUrlParser(new ImageTagParser(), new 
PixelDensityDescriptorParser());
+    private long blockSize;
 
     public SavedPageSyncService() {
         super("SavedPageSyncService");
         dao = ReadingListPageDao.instance();
+        blockSize = 
FileUtil.blockSize(cacheDelegate.diskLruCache().getDirectory());
     }
 
     @Override protected void onHandleIntent(@Nullable Intent intent) {
@@ -109,41 +116,48 @@
     private void saveNewEntries(List<ReadingListPageDiskRow> queue) {
         while (!queue.isEmpty()) {
             ReadingListPageDiskRow row = queue.get(0);
-            boolean ok = savePageFor(row);
-            if (!ok) {
+            PageTitle pageTitle = makeTitleFrom(row);
+            if (pageTitle == null) {
+                // todo: won't this fail forever or until the page is marked 
unsaved / removed somehow?
                 dao.failDiskTransaction(queue);
                 break;
             }
-            dao.completeDiskTransaction(row);
+
+            AggregatedResponseSize size;
+            try {
+                size = savePageFor(pageTitle);
+            } catch (IOException e) {
+                dao.failDiskTransaction(queue);
+                break;
+            }
+
+            ReadingListPageDiskRow rowWithUpdatedSize = new 
ReadingListPageDiskRow(row,
+                    
ReadingListPageRow.builder().copy(row.dat()).logicalSize(size.logicalSize()).physicalSize(size.physicalSize()).build());
+            dao.completeDiskTransaction(rowWithUpdatedSize);
             queue.remove(row);
         }
     }
 
-    private boolean savePageFor(@NonNull ReadingListPageDiskRow row) {
-        PageTitle pageTitle = makeTitleFrom(row);
-        if (pageTitle == null) {
-            return false;
-        }
+    @NonNull private AggregatedResponseSize savePageFor(@NonNull PageTitle 
pageTitle) throws IOException {
+        AggregatedResponseSize size = new AggregatedResponseSize(0, 0, 0);
+
+        Call<PageLead> leadCall = reqPageLead(null, pageTitle);
+        Call<PageRemaining> sectionsCall = reqPageSections(null, pageTitle);
+
+        retrofit2.Response<PageLead> leadRsp = leadCall.execute();
+        size = size.add(responseSize(leadRsp));
+        retrofit2.Response<PageRemaining> sectionsRsp = sectionsCall.execute();
+        size = size.add(responseSize(sectionsRsp));
+
+        Set<String> imageUrls = new 
HashSet<>(pageImageUrlParser.parse(leadRsp.body()));
+        imageUrls.addAll(pageImageUrlParser.parse(sectionsRsp.body()));
+
+        size = size.add(reqSaveImage(pageTitle.getWikiSite(), imageUrls));
 
         String title = pageTitle.getPrefixedText();
-        ImmutablePair<PageLead, PageRemaining> page;
-        try {
-            page = reqPage(null, pageTitle);
-            reqSaveImage(pageTitle.getWikiSite(), 
pageImageUrlParser.parse(page.getLeft()));
-            reqSaveImage(pageTitle.getWikiSite(), 
pageImageUrlParser.parse(page.getRight()));
-        } catch (IOException e) {
-            L.e("Failed to save page " + title, e);
-            return false;
-        }
-        L.i("Saved page " + title);
-        return true;
-    }
+        L.i("Saved page " + title + " (" + size + ")");
 
-    @NonNull private ImmutablePair<PageLead, PageRemaining> reqPage(@Nullable 
CacheControl cacheControl,
-                                                                    @NonNull 
PageTitle pageTitle) throws IOException {
-        PageLead lead = reqPageLead(cacheControl, pageTitle).execute().body();
-        PageRemaining sections = reqPageSections(cacheControl, 
pageTitle).execute().body();
-        return new ImmutablePair<>(lead, sections);
+        return size;
     }
 
     @NonNull private Call<PageLead> reqPageLead(@Nullable CacheControl 
cacheControl,
@@ -169,18 +183,25 @@
         return client.sections(cacheControl, cacheOption, title, noImages);
     }
 
-    private void reqSaveImage(@NonNull WikiSite wiki, @NonNull List<String> 
urls) throws IOException {
+    private AggregatedResponseSize reqSaveImage(@NonNull WikiSite wiki, 
@NonNull Iterable<String> urls) throws IOException {
+        AggregatedResponseSize size = new AggregatedResponseSize(0, 0, 0);
         for (String url : urls) {
-            reqSaveImage(wiki, url);
+            size = size.add(reqSaveImage(wiki, url));
         }
+        return size;
     }
 
-    private void reqSaveImage(@NonNull WikiSite wiki, @NonNull String url) 
throws IOException {
+    @NonNull private ResponseSize reqSaveImage(@NonNull WikiSite wiki, 
@NonNull String url) throws IOException {
         Request request = saveImageReq(wiki, url);
+
+        Response rsp = 
OkHttpConnectionFactory.getClient().newCall(request).execute();
 
         // Note: raw non-Retrofit usage of OkHttp Requests requires that the 
Response body is read
         // for the cache to be written.
-        
OkHttpConnectionFactory.getClient().newCall(request).execute().body().close();
+        rsp.body().close();
+
+        // Size must be checked after the body has been written.
+        return responseSize(rsp);
     }
 
     @NonNull private Request saveImageReq(@NonNull WikiSite wiki, @NonNull 
String url) {
@@ -189,6 +210,24 @@
                 .addHeader(SaveHeader.FIELD, SaveHeader.VAL_ENABLED)
                 .url(UriUtil.resolveProtocolRelativeUrl(wiki, url))
                 .build();
+    }
+
+    @NonNull private ResponseSize responseSize(@NonNull Response rsp) {
+        return responseSize(rsp.request());
+    }
+
+    @NonNull private ResponseSize responseSize(@NonNull retrofit2.Response 
rsp) {
+        return responseSize(rsp.raw().request());
+    }
+
+    @NonNull private ResponseSize responseSize(@NonNull Request req) {
+        return responseSize(cacheDelegate.entry(req));
+    }
+
+    @NonNull private ResponseSize responseSize(@Nullable DiskLruCache.Snapshot 
snapshot) {
+        long metadataSize = 
DiskLruCacheUtil.okHttpResponseMetadataSize(snapshot);
+        long bodySize = DiskLruCacheUtil.okHttpResponseBodySize(snapshot);
+        return new ResponseSize(metadataSize, bodySize);
     }
 
     @Nullable private PageTitle makeTitleFrom(@NonNull ReadingListPageDiskRow 
row) {
@@ -203,4 +242,67 @@
     @NonNull private PageClient newPageClient(@NonNull PageTitle title) {
         return PageClientFactory.create(title.getWikiSite(), 
title.namespace());
     }
+
+    private static class AggregatedResponseSize {
+        private final long physicalSize;
+        private final long logicalSize;
+        private final int responsesAggregated;
+
+        AggregatedResponseSize(long physicalSize, long logicalSize, int 
responsesAggregated) {
+            this.physicalSize = physicalSize;
+            this.logicalSize = logicalSize;
+            this.responsesAggregated = responsesAggregated;
+        }
+
+        @Override public String toString() {
+            return "responses=" + responsesAggregated() + " physical=" + 
physicalSize() + "B logical=" + logicalSize() + "B";
+        }
+
+        long physicalSize() {
+            return physicalSize;
+        }
+
+        // The size on disk.
+        long logicalSize() {
+            return logicalSize;
+        }
+
+        int responsesAggregated() {
+            return responsesAggregated;
+        }
+
+        @NonNull AggregatedResponseSize add(@NonNull ResponseSize size) {
+            return new AggregatedResponseSize(physicalSize + 
size.physicalSize(),
+                    logicalSize + size.logicalSize(), responsesAggregated() + 
1);
+        }
+
+        @NonNull AggregatedResponseSize add(@NonNull AggregatedResponseSize 
size) {
+            return new AggregatedResponseSize(physicalSize + 
size.physicalSize(),
+                    logicalSize + size.logicalSize(), responsesAggregated() + 
size.responsesAggregated());
+        }
+    }
+
+    private class ResponseSize {
+        private final long metadataSize;
+        private final long bodySize;
+
+        ResponseSize(long metadataSize, long bodySize) {
+            this.metadataSize = metadataSize;
+            this.bodySize = bodySize;
+        }
+
+        @Override public String toString() {
+            return "physical metadata=" + metadataSize + "B physical body=" + 
bodySize
+                    + "B physical=" + physicalSize() + "B logical=" + 
logicalSize() + "B";
+        }
+
+        long physicalSize() {
+            return metadataSize + bodySize;
+        }
+
+        long logicalSize() {
+            return FileUtil.physicalToLogicalSize(metadataSize, blockSize)
+                    + FileUtil.physicalToLogicalSize(bodySize, blockSize);
+        }
+     }
 }
diff --git a/app/src/main/java/org/wikipedia/util/FileUtil.java 
b/app/src/main/java/org/wikipedia/util/FileUtil.java
index b91b16d..e0e0ee2 100644
--- a/app/src/main/java/org/wikipedia/util/FileUtil.java
+++ b/app/src/main/java/org/wikipedia/util/FileUtil.java
@@ -1,6 +1,8 @@
 package org.wikipedia.util;
 
 import android.graphics.Bitmap;
+import android.os.Build;
+import android.os.StatFs;
 
 import java.io.BufferedReader;
 import java.io.ByteArrayOutputStream;
@@ -13,6 +15,26 @@
 public final class FileUtil {
     public static final int JPEG_QUALITY = 85;
 
+    public static long physicalToLogicalSize(long physical, long blockSize) {
+        if (physical == 0) {
+            return 0;
+        }
+
+        long minBlockSize = Math.max(1, blockSize);
+        return (physical / minBlockSize + Math.min(physical % minBlockSize, 
1)) * minBlockSize;
+    }
+
+    public static long blockSize(File file) {
+        StatFs statFs = new StatFs(file.getAbsolutePath());
+
+        if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) {
+            return statFs.getBlockSizeLong();
+        }
+
+        //noinspection deprecation
+        return statFs.getBlockSize();
+    }
+
     public static File writeToFile(ByteArrayOutputStream bytes, File 
destinationFile) throws IOException {
         FileOutputStream fo = new FileOutputStream(destinationFile);
         try {
diff --git 
a/app/src/test/java/okhttp3/internal/cache/CacheDelegateInterceptorTest.java 
b/app/src/test/java/okhttp3/internal/cache/CacheDelegateInterceptorTest.java
index b2a9c18..6636f85 100644
--- a/app/src/test/java/okhttp3/internal/cache/CacheDelegateInterceptorTest.java
+++ b/app/src/test/java/okhttp3/internal/cache/CacheDelegateInterceptorTest.java
@@ -1,7 +1,10 @@
 package okhttp3.internal.cache;
 
+import android.os.Build;
 import android.support.annotation.NonNull;
+import android.support.annotation.RequiresApi;
 
+import org.apache.commons.lang3.StringUtils;
 import org.junit.Before;
 import org.junit.Test;
 import org.wikipedia.dataclient.okhttp.HttpStatusException;
@@ -10,13 +13,25 @@
 import org.wikipedia.test.ImmediateExecutorService;
 import org.wikipedia.test.MockWebServerTest;
 
+import java.nio.charset.StandardCharsets;
+
 import okhttp3.CacheControl;
 import okhttp3.CacheDelegate;
 import okhttp3.Dispatcher;
 import okhttp3.Request;
+import okhttp3.mockwebserver.MockResponse;
+import okio.Buffer;
+import okio.GzipSink;
+import okio.Sink;
 
 import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.allOf;
+import static org.hamcrest.Matchers.greaterThan;
 import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.lessThan;
+import static org.hamcrest.Matchers.notNullValue;
+import static 
org.wikipedia.dataclient.okhttp.cache.DiskLruCacheUtil.okHttpResponseBodySize;
+import static 
org.wikipedia.dataclient.okhttp.cache.DiskLruCacheUtil.okHttpResponseMetadataSize;
 
 public class CacheDelegateInterceptorTest extends MockWebServerTest {
     private static final String URL = "url";
@@ -32,8 +47,8 @@
         saveCache.remove(req);
     }
 
-    // Sanity check that both caches are truly empty
-    @Test(expected = HttpStatusException.class) public void testAssumptions() 
throws Throwable {
+    // Both the network and saved cache are expected to be empty after each 
test's setUp().
+    @Test(expected = HttpStatusException.class) public void 
testAssumptionCacheIsEmptyAfterSetUp() throws Throwable {
         Request req = newOnlyIfCachedRequest();
 
         assertCached(netCache, req, false);
@@ -42,6 +57,76 @@
         executeRequest(req);
     }
 
+    // The size on disk of an empty body is expected to be zero.
+    @Test public void testAssumptionCacheSizeEmptyBody() throws Throwable {
+        Request req = newRequest();
+        requestResponse("", req);
+
+        DiskLruCache.Snapshot snapshot = netCache.entry(req);
+
+        assertThat(okHttpResponseBodySize(snapshot), is(0L));
+    }
+
+    // The size on disk of a nonempty body is expected to be nonzero.
+    @Test public void testAssumptionCacheSizeNonemptyBody() throws Throwable {
+        Request req = newRequest();
+        requestResponse("A", req);
+
+        DiskLruCache.Snapshot snapshot = netCache.entry(req);
+        assertThat(okHttpResponseBodySize(snapshot), is(1L));
+    }
+
+    // The size on disk of OkHttp metadata is expected to be nonzero.
+    @RequiresApi(api = Build.VERSION_CODES.KITKAT)
+    @Test public void testAssumptionCacheSizeMetadataIsNonzero() throws 
Throwable {
+        Request req = newRequest();
+        requestResponse("A", req);
+
+        DiskLruCache.Snapshot snapshot = netCache.entry(req);
+
+        // The size on disk of OkHttp metadata overhead is expected to be 
nonzero and necessary to
+        // consider when calculating disk usage for a page and all of it's 
resources so that more
+        // just the Content-Length header need be considered for each resource 
response.
+        assertThat(okHttpResponseMetadataSize(snapshot), notNullValue());
+    }
+
+    // Although OkHttp decompresses gzipped service responses seamlessly, the 
cache is expected to
+    // persist them in compressed form and report the compressed size, not the 
decompressed size.
+    @RequiresApi(api = Build.VERSION_CODES.KITKAT)
+    @Test public void testAssumptionCacheSizeCompressedSizeIsReported() throws 
Throwable {
+        String interval = "0123456789"; // One cycle.
+        String body = StringUtils.repeat(interval, 100_000); // The body is 
many intervals.
+
+        Buffer buffer = new Buffer();
+        Sink sink = new GzipSink(buffer);
+        Buffer uncompressedBuffer = new Buffer().writeString(body, 
StandardCharsets.UTF_8);
+        long uncompressedSize = uncompressedBuffer.size();
+        sink.write(uncompressedBuffer, uncompressedBuffer.size());
+        sink.close();
+
+        // The compressed size is expected to be worse than one interval but 
at least 100x better
+        // than all intervals.
+        long compressedSize = buffer.size();
+        assertThat(compressedSize,
+                allOf(greaterThan((long) interval.length()), 
lessThan(uncompressedSize / 100L)));
+
+        // Enqueue a compressed response.
+        MockResponse serviceResponse = new MockResponse()
+                .addHeader("Content-Encoding", "gzip")
+                .setBody(buffer);
+        server().enqueue(serviceResponse);
+
+        Request req = newRequest();
+        String rsp = executeRequest(req);
+        server().takeRequest();
+        assertThat(rsp, is(body));
+
+        DiskLruCache.Snapshot snapshot = netCache.entry(req);
+
+        // The size on disk is expected to be the compressed size.
+        assertThat(okHttpResponseBodySize(snapshot), is(compressedSize));
+    }
+
     @Test public void testInterceptWriteNetCacheNoHeader() throws Throwable {
         Request req = newRequest();
         requestResponse("0", req);

-- 
To view, visit https://gerrit.wikimedia.org/r/351212
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings

Gerrit-MessageType: merged
Gerrit-Change-Id: I4b021e7a9b43f2891e5b5d520928bafca536a68d
Gerrit-PatchSet: 5
Gerrit-Project: apps/android/wikipedia
Gerrit-Branch: master
Gerrit-Owner: Niedzielski <sniedziel...@wikimedia.org>
Gerrit-Reviewer: Dbrant <dbr...@wikimedia.org>
Gerrit-Reviewer: Mholloway <mhollo...@wikimedia.org>
Gerrit-Reviewer: Niedzielski <sniedziel...@wikimedia.org>
Gerrit-Reviewer: Sniedzielski <sniedziel...@wikimedia.org>
Gerrit-Reviewer: jenkins-bot <>

_______________________________________________
MediaWiki-commits mailing list
MediaWiki-commits@lists.wikimedia.org
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits

Reply via email to