All the known heavy code blocks are measured (except object database
access). This should help identify if an optimization is effective or
not. An unoptimized git-status would give something like below (92% of
time is accounted). To sum up the effort of making git scale better:

 - read cache line is being addressed by index-helper
 - preload/refresh index by watchman
 - read packed refs by lmdb backend
 - diff-index by rebuilding cache-tree
 - read directory by untracked cache and watchman
 - write index by split index
 - name hash potientially by index-helper

read-cache.c:2075         performance: 0.004058570 s: read cache .../index
preload-index.c:104       performance: 0.004419864 s: preload index
read-cache.c:1265         performance: 0.000185224 s: refresh index
refs/files-backend.c:1100 performance: 0.001935788 s: read packed refs
diff-lib.c:240            performance: 0.000144132 s: diff-files
diff-lib.c:506            performance: 0.013592000 s: diff-index
name-hash.c:128           performance: 0.000614177 s: initialize name hash
dir.c:2030                performance: 0.015814103 s: read directory
read-cache.c:2565         performance: 0.004052343 s: write index, changed mask 
= 2
trace.c:420               performance: 0.048365509 s: git command: './git' 
'status'

Signed-off-by: Nguyễn Thái Ngọc Duy <pclo...@gmail.com>
---
 This is worth merging, I think.

 diff-lib.c           |  4 ++++
 dir.c                |  2 ++
 name-hash.c          |  2 ++
 preload-index.c      |  2 ++
 read-cache.c         | 11 +++++++++++
 refs/files-backend.c |  2 ++
 6 files changed, 23 insertions(+)

diff --git a/diff-lib.c b/diff-lib.c
index bc49c70..7af7f9a 100644
--- a/diff-lib.c
+++ b/diff-lib.c
@@ -90,6 +90,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
        int diff_unmerged_stage = revs->max_count;
        unsigned ce_option = ((option & DIFF_RACY_IS_MODIFIED)
                              ? CE_MATCH_RACY_IS_DIRTY : 0);
+       uint64_t start = getnanotime();
 
        diff_set_mnemonic_prefix(&revs->diffopt, "i/", "w/");
 
@@ -236,6 +237,7 @@ int run_diff_files(struct rev_info *revs, unsigned int 
option)
        }
        diffcore_std(&revs->diffopt);
        diff_flush(&revs->diffopt);
+       trace_performance_since(start, "diff-files");
        return 0;
 }
 
@@ -491,6 +493,7 @@ static int diff_cache(struct rev_info *revs,
 int run_diff_index(struct rev_info *revs, int cached)
 {
        struct object_array_entry *ent;
+       uint64_t start = getnanotime();
 
        ent = revs->pending.objects;
        if (diff_cache(revs, ent->item->oid.hash, ent->name, cached))
@@ -500,6 +503,7 @@ int run_diff_index(struct rev_info *revs, int cached)
        diffcore_fix_diff_index(&revs->diffopt);
        diffcore_std(&revs->diffopt);
        diff_flush(&revs->diffopt);
+       trace_performance_since(start, "diff-index");
        return 0;
 }
 
diff --git a/dir.c b/dir.c
index 91003e5..1ebf9fe 100644
--- a/dir.c
+++ b/dir.c
@@ -1991,6 +1991,7 @@ int read_directory(struct dir_struct *dir, const char 
*path, int len, const stru
 {
        struct path_simplify *simplify;
        struct untracked_cache_dir *untracked;
+       uint64_t start = getnanotime();
 
        /*
         * Check out create_simplify()
@@ -2026,6 +2027,7 @@ int read_directory(struct dir_struct *dir, const char 
*path, int len, const stru
        free_simplify(simplify);
        qsort(dir->entries, dir->nr, sizeof(struct dir_entry *), cmp_name);
        qsort(dir->ignored, dir->ignored_nr, sizeof(struct dir_entry *), 
cmp_name);
+       trace_performance_since(start, "read directory %.*s", len, path);
        if (dir->untracked) {
                static struct trace_key trace_untracked_stats = 
TRACE_KEY_INIT(UNTRACKED_STATS);
                trace_printf_key(&trace_untracked_stats,
diff --git a/name-hash.c b/name-hash.c
index 6d9f23e..b3966d8 100644
--- a/name-hash.c
+++ b/name-hash.c
@@ -115,6 +115,7 @@ static int cache_entry_cmp(const struct cache_entry *ce1,
 static void lazy_init_name_hash(struct index_state *istate)
 {
        int nr;
+       uint64_t start = getnanotime();
 
        if (istate->name_hash_initialized)
                return;
@@ -124,6 +125,7 @@ static void lazy_init_name_hash(struct index_state *istate)
        for (nr = 0; nr < istate->cache_nr; nr++)
                hash_index_entry(istate, istate->cache[nr]);
        istate->name_hash_initialized = 1;
+       trace_performance_since(start, "initialize name hash");
 }
 
 void add_name_hash(struct index_state *istate, struct cache_entry *ce)
diff --git a/preload-index.c b/preload-index.c
index c1fe3a3..7bb4809 100644
--- a/preload-index.c
+++ b/preload-index.c
@@ -72,6 +72,7 @@ static void preload_index(struct index_state *index,
 {
        int threads, i, work, offset;
        struct thread_data data[MAX_PARALLEL];
+       uint64_t start = getnanotime();
 
        if (!core_preload_index)
                return;
@@ -100,6 +101,7 @@ static void preload_index(struct index_state *index,
                if (pthread_join(p->pthread, NULL))
                        die("unable to join threaded lstat");
        }
+       trace_performance_since(start, "preload index");
 }
 #endif
 
diff --git a/read-cache.c b/read-cache.c
index a73487e..3a79581 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -1187,6 +1187,7 @@ int refresh_index(struct index_state *istate, unsigned 
int flags,
        const char *typechange_fmt;
        const char *added_fmt;
        const char *unmerged_fmt;
+       uint64_t start = getnanotime();
 
        modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
        deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
@@ -1261,6 +1262,7 @@ int refresh_index(struct index_state *istate, unsigned 
int flags,
 
                replace_index_entry(istate, i, new);
        }
+       trace_performance_since(start, "refresh index");
        return has_errors;
 }
 
@@ -2062,12 +2064,15 @@ int read_index_from(struct index_state *istate, const 
char *path)
 {
        struct split_index *split_index;
        int ret;
+       uint64_t start;
 
        /* istate->initialized covers both .git/index and .git/sharedindex.xxx 
*/
        if (istate->initialized)
                return istate->cache_nr;
 
+       start = getnanotime();
        ret = do_read_index(istate, path, 0);
+       trace_performance_since(start, "read cache %s", path);
 
        split_index = istate->split_index;
        if (!split_index || is_null_sha1(split_index->base_sha1)) {
@@ -2082,6 +2087,7 @@ int read_index_from(struct index_state *istate, const 
char *path)
        split_index->base->keep_mmap = istate->keep_mmap;
        split_index->base->to_shm    = istate->to_shm;
        split_index->base->from_shm  = istate->from_shm;
+       start = getnanotime();
        ret = do_read_index(split_index->base,
                            git_path("sharedindex.%s",
                                     sha1_to_hex(split_index->base_sha1)), 1);
@@ -2093,6 +2099,9 @@ int read_index_from(struct index_state *istate, const 
char *path)
                    sha1_to_hex(split_index->base->sha1));
        merge_base_index(istate);
        post_read_index_from(istate);
+       trace_performance_since(start, "read cache %s",
+                               git_path("sharedindex.%s",
+                                        sha1_to_hex(split_index->base_sha1)));
 
 done:
        if (ret > 0 && istate->from_shm && istate->last_update)
@@ -2437,6 +2446,7 @@ static int do_write_index(struct index_state *istate, int 
newfd,
        int entries = istate->cache_nr;
        struct stat st;
        struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
+       uint64_t start = getnanotime();
 
        for (i = removed = extended = 0; i < entries; i++) {
                if (cache[i]->ce_flags & CE_REMOVE)
@@ -2552,6 +2562,7 @@ static int do_write_index(struct index_state *istate, int 
newfd,
                return -1;
        istate->timestamp.sec = (unsigned int)st.st_mtime;
        istate->timestamp.nsec = ST_MTIME_NSEC(st);
+       trace_performance_since(start, "write index, changed mask = %x", 
istate->cache_changed);
        return 0;
 }
 
diff --git a/refs/files-backend.c b/refs/files-backend.c
index ea78ce9..b88913f 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -1048,6 +1048,7 @@ static void read_packed_refs(FILE *f, struct ref_dir *dir)
        struct ref_entry *last = NULL;
        struct strbuf line = STRBUF_INIT;
        enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled = PEELED_NONE;
+       uint64_t start = getnanotime();
 
        while (strbuf_getwholeline(&line, f, '\n') != EOF) {
                unsigned char sha1[20];
@@ -1096,6 +1097,7 @@ static void read_packed_refs(FILE *f, struct ref_dir *dir)
        }
 
        strbuf_release(&line);
+       trace_performance_since(start, "read packed refs");
 }
 
 /*
-- 
2.8.0.rc0.210.gd302cd2

--
To unsubscribe from this list: send the line "unsubscribe git" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to