Author: mav
Date: Fri Mar 24 09:37:00 2017
New Revision: 315896
URL: https://svnweb.freebsd.org/changeset/base/315896

Log:
  MFV r315290, r315291: 7303 dynamic metaslab selection
  
  illumos/illumos-gate@8363e80ae72609660f6090766ca8c2c18aa53f0c
  
https://github.com/illumos/illumos-gate/commit/8363e80ae72609660f6090766ca8c2c18
  
  https://www.illumos.org/issues/7303
  
    This change introduces a new weighting algorithm to improve metaslab 
selection.
    The new weighting algorithm relies on the SPACEMAP_HISTOGRAM feature. As a 
result,
    the metaslab weight now encodes the type of weighting algorithm used
    (size-based vs segment-based).
  
    This also introduce a new allocation tracing facility and two new dcmds to 
help
    debug allocation problems. Each zio now contains a zio_alloc_list_t 
structure
    that is populated as the zio goes through the allocations stage. Here's an
    example of how to use the tracing facility:
  
  > c5ec000::print zio_t io_alloc_list | ::walk list | ::metaslab_trace
    MSID    DVA    ASIZE      WEIGHT             RESULT               VDEV
       -      0      400           0    NOT_ALLOCATABLE           ztest.0a
       -      0      400           0    NOT_ALLOCATABLE           ztest.0a
       -      0      400           0             ENOSPC           ztest.0a
       -      0      200           0    NOT_ALLOCATABLE           ztest.0a
       -      0      200           0    NOT_ALLOCATABLE           ztest.0a
       -      0      200           0             ENOSPC           ztest.0a
       1      0      400      1 x 8M            17b1a00           ztest.0a
  
  > 1ff2400::print zio_t io_alloc_list | ::walk list | ::metaslab_trace
    MSID    DVA    ASIZE      WEIGHT             RESULT               VDEV
       -      0      200           0    NOT_ALLOCATABLE           mirror-2
       -      0      200           0    NOT_ALLOCATABLE           mirror-0
       1      0      200      1 x 4M            112ae00           mirror-1
       -      1      200           0    NOT_ALLOCATABLE           mirror-2
       -      1      200           0    NOT_ALLOCATABLE           mirror-0
       1      1      200      1 x 4M            112b000           mirror-1
       -      2      200           0    NOT_ALLOCATABLE           mirror-2
  
    If the metaslab is using segment-based weighting then the WEIGHT column will
    display the number of segments available in the bucket where the allocation
    attempt was made.
  
  Author: George Wilson <george.wil...@delphix.com>
  Reviewed by: Alex Reece <a...@delphix.com>
  Reviewed by: Chris Siden <christopher.si...@delphix.com>
  Reviewed by: Dan Kimmel <dan.kim...@delphix.com>
  Reviewed by: Matthew Ahrens <mahr...@delphix.com>
  Reviewed by: Paul Dagnelie <paul.dagne...@delphix.com>
  Reviewed by: Pavel Zakharov <pavel.zakha...@delphix.com>
  Reviewed by: Prakash Surya <prakash.su...@delphix.com>
  Reviewed by: Don Brady <don.br...@intel.com>
  Approved by: Richard Lowe <richl...@richlowe.net>

Modified:
  head/cddl/contrib/opensolaris/cmd/zdb/zdb.c
  head/cddl/contrib/opensolaris/cmd/ztest/ztest.c
  head/cddl/contrib/opensolaris/lib/libzpool/common/kernel.c
  head/sys/cddl/compat/opensolaris/kern/opensolaris_kstat.c
  head/sys/cddl/compat/opensolaris/sys/kstat.h
  head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c
  head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c
  head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c
  head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/space_map.c
  head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/metaslab.h
  head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/metaslab_impl.h
  head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_debug.h
  head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zio.h
  head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c
Directory Properties:
  head/cddl/contrib/opensolaris/   (props changed)
  head/cddl/contrib/opensolaris/cmd/zdb/   (props changed)
  head/sys/cddl/contrib/opensolaris/   (props changed)

Modified: head/cddl/contrib/opensolaris/cmd/zdb/zdb.c
==============================================================================
--- head/cddl/contrib/opensolaris/cmd/zdb/zdb.c Fri Mar 24 08:06:00 2017        
(r315895)
+++ head/cddl/contrib/opensolaris/cmd/zdb/zdb.c Fri Mar 24 09:37:00 2017        
(r315896)
@@ -2589,10 +2589,21 @@ zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
 
        if (!dump_opt['L']) {
                vdev_t *rvd = spa->spa_root_vdev;
+
+               /*
+                * We are going to be changing the meaning of the metaslab's
+                * ms_tree.  Ensure that the allocator doesn't try to
+                * use the tree.
+                */
+               spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
+               spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
+
                for (uint64_t c = 0; c < rvd->vdev_children; c++) {
                        vdev_t *vd = rvd->vdev_child[c];
+                       metaslab_group_t *mg = vd->vdev_mg;
                        for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
                                metaslab_t *msp = vd->vdev_ms[m];
+                               ASSERT3P(msp->ms_group, ==, mg);
                                mutex_enter(&msp->ms_lock);
                                metaslab_unload(msp);
 
@@ -2613,8 +2624,6 @@ zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
                                            (longlong_t)m,
                                            (longlong_t)vd->vdev_ms_count);
 
-                                       msp->ms_ops = &zdb_metaslab_ops;
-
                                        /*
                                         * We don't want to spend the CPU
                                         * manipulating the size-ordered
@@ -2624,7 +2633,10 @@ zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
                                        msp->ms_tree->rt_ops = NULL;
                                        VERIFY0(space_map_load(msp->ms_sm,
                                            msp->ms_tree, SM_ALLOC));
-                                       msp->ms_loaded = B_TRUE;
+
+                                       if (!msp->ms_loaded) {
+                                               msp->ms_loaded = B_TRUE;
+                                       }
                                }
                                mutex_exit(&msp->ms_lock);
                        }
@@ -2646,8 +2658,10 @@ zdb_leak_fini(spa_t *spa)
                vdev_t *rvd = spa->spa_root_vdev;
                for (int c = 0; c < rvd->vdev_children; c++) {
                        vdev_t *vd = rvd->vdev_child[c];
+                       metaslab_group_t *mg = vd->vdev_mg;
                        for (int m = 0; m < vd->vdev_ms_count; m++) {
                                metaslab_t *msp = vd->vdev_ms[m];
+                               ASSERT3P(mg, ==, msp->ms_group);
                                mutex_enter(&msp->ms_lock);
 
                                /*
@@ -2661,7 +2675,10 @@ zdb_leak_fini(spa_t *spa)
                                 * from the ms_tree.
                                 */
                                range_tree_vacate(msp->ms_tree, zdb_leak, vd);
-                               msp->ms_loaded = B_FALSE;
+
+                               if (msp->ms_loaded) {
+                                       msp->ms_loaded = B_FALSE;
+                               }
 
                                mutex_exit(&msp->ms_lock);
                        }

Modified: head/cddl/contrib/opensolaris/cmd/ztest/ztest.c
==============================================================================
--- head/cddl/contrib/opensolaris/cmd/ztest/ztest.c     Fri Mar 24 08:06:00 
2017        (r315895)
+++ head/cddl/contrib/opensolaris/cmd/ztest/ztest.c     Fri Mar 24 09:37:00 
2017        (r315896)
@@ -173,7 +173,7 @@ static const ztest_shared_opts_t ztest_o
        .zo_mirrors = 2,
        .zo_raidz = 4,
        .zo_raidz_parity = 1,
-       .zo_vdev_size = SPA_MINDEVSIZE * 2,
+       .zo_vdev_size = SPA_MINDEVSIZE * 4,     /* 256m default size */
        .zo_datasets = 7,
        .zo_threads = 23,
        .zo_passtime = 60,              /* 60 seconds */

Modified: head/cddl/contrib/opensolaris/lib/libzpool/common/kernel.c
==============================================================================
--- head/cddl/contrib/opensolaris/lib/libzpool/common/kernel.c  Fri Mar 24 
08:06:00 2017        (r315895)
+++ head/cddl/contrib/opensolaris/lib/libzpool/common/kernel.c  Fri Mar 24 
09:37:00 2017        (r315896)
@@ -97,6 +97,11 @@ kstat_create(char *module, int instance,
 
 /*ARGSUSED*/
 void
+kstat_named_init(kstat_named_t *knp, const char *name, uchar_t type)
+{}
+
+/*ARGSUSED*/
+void
 kstat_install(kstat_t *ksp)
 {}
 

Modified: head/sys/cddl/compat/opensolaris/kern/opensolaris_kstat.c
==============================================================================
--- head/sys/cddl/compat/opensolaris/kern/opensolaris_kstat.c   Fri Mar 24 
08:06:00 2017        (r315895)
+++ head/sys/cddl/compat/opensolaris/kern/opensolaris_kstat.c   Fri Mar 24 
09:37:00 2017        (r315896)
@@ -129,3 +129,19 @@ kstat_delete(kstat_t *ksp)
        sysctl_ctx_free(&ksp->ks_sysctl_ctx);
        free(ksp, M_KSTAT);
 }
+
+void
+kstat_set_string(char *dst, const char *src)
+{
+
+       bzero(dst, KSTAT_STRLEN);
+       (void) strncpy(dst, src, KSTAT_STRLEN - 1);
+}
+
+void
+kstat_named_init(kstat_named_t *knp, const char *name, uchar_t data_type)
+{
+
+       kstat_set_string(knp->name, name);
+       knp->data_type = data_type;
+}

Modified: head/sys/cddl/compat/opensolaris/sys/kstat.h
==============================================================================
--- head/sys/cddl/compat/opensolaris/sys/kstat.h        Fri Mar 24 08:06:00 
2017        (r315895)
+++ head/sys/cddl/compat/opensolaris/sys/kstat.h        Fri Mar 24 09:37:00 
2017        (r315896)
@@ -69,5 +69,7 @@ kstat_t *kstat_create(char *module, int 
     uchar_t type, ulong_t ndata, uchar_t flags);
 void kstat_install(kstat_t *ksp);
 void kstat_delete(kstat_t *ksp);
+void kstat_set_string(char *, const char *);
+void kstat_named_init(kstat_named_t *, const char *, uchar_t);
 
 #endif /* _OPENSOLARIS_SYS_KSTAT_H_ */

Modified: head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c
==============================================================================
--- head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c      Fri Mar 
24 08:06:00 2017        (r315895)
+++ head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c      Fri Mar 
24 09:37:00 2017        (r315896)
@@ -41,11 +41,6 @@ SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab
 #define        GANG_ALLOCATION(flags) \
        ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
 
-#define        METASLAB_WEIGHT_PRIMARY         (1ULL << 63)
-#define        METASLAB_WEIGHT_SECONDARY       (1ULL << 62)
-#define        METASLAB_ACTIVE_MASK            \
-       (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
-
 uint64_t metaslab_aliquot = 512ULL << 10;
 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1;    /* force gang blocks */
 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN,
@@ -55,7 +50,7 @@ SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO,
 /*
  * The in-core space map representation is more compact than its on-disk form.
  * The zfs_condense_pct determines how much more compact the in-core
- * space_map representation must be before we compact it on-disk.
+ * space map representation must be before we compact it on-disk.
  * Values should be greater than or equal to 100.
  */
 int zfs_condense_pct = 200;
@@ -153,7 +148,7 @@ SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO,
 /*
  * The minimum free space, in percent, which must be available
  * in a space map to continue allocations in a first-fit fashion.
- * Once the space_map's free space drops below this level we dynamically
+ * Once the space map's free space drops below this level we dynamically
  * switch to using best-fit allocations.
  */
 int metaslab_df_free_pct = 4;
@@ -230,7 +225,38 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, 
     &metaslab_bias_enabled, 0,
     "Enable metaslab group biasing");
 
-static uint64_t metaslab_fragmentation(metaslab_t *);
+/*
+ * Enable/disable segment-based metaslab selection.
+ */
+boolean_t zfs_metaslab_segment_weight_enabled = B_TRUE;
+
+/*
+ * When using segment-based metaslab selection, we will continue
+ * allocating from the active metaslab until we have exhausted
+ * zfs_metaslab_switch_threshold of its buckets.
+ */
+int zfs_metaslab_switch_threshold = 2;
+
+/*
+ * Internal switch to enable/disable the metaslab allocation tracing
+ * facility.
+ */
+boolean_t metaslab_trace_enabled = B_TRUE;
+
+/*
+ * Maximum entries that the metaslab allocation tracing facility will keep
+ * in a given list when running in non-debug mode. We limit the number
+ * of entries in non-debug mode to prevent us from using up too much memory.
+ * The limit should be sufficiently large that we don't expect any allocation
+ * to every exceed this value. In debug mode, the system will panic if this
+ * limit is ever reached allowing for further investigation.
+ */
+uint64_t metaslab_trace_max_entries = 5000;
+
+static uint64_t metaslab_weight(metaslab_t *);
+static void metaslab_set_fragmentation(metaslab_t *);
+
+kmem_cache_t *metaslab_alloc_trace_cache;
 
 /*
  * ==========================================================================
@@ -475,11 +501,6 @@ metaslab_class_expandable_space(metaslab
        return (space);
 }
 
-/*
- * ==========================================================================
- * Metaslab groups
- * ==========================================================================
- */
 static int
 metaslab_compare(const void *x1, const void *x2)
 {
@@ -505,6 +526,57 @@ metaslab_compare(const void *x1, const v
 }
 
 /*
+ * Verify that the space accounting on disk matches the in-core range_trees.
+ */
+void
+metaslab_verify_space(metaslab_t *msp, uint64_t txg)
+{
+       spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
+       uint64_t allocated = 0;
+       uint64_t freed = 0;
+       uint64_t sm_free_space, msp_free_space;
+
+       ASSERT(MUTEX_HELD(&msp->ms_lock));
+
+       if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
+               return;
+
+       /*
+        * We can only verify the metaslab space when we're called
+        * from syncing context with a loaded metaslab that has an allocated
+        * space map. Calling this in non-syncing context does not
+        * provide a consistent view of the metaslab since we're performing
+        * allocations in the future.
+        */
+       if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
+           !msp->ms_loaded)
+               return;
+
+       sm_free_space = msp->ms_size - space_map_allocated(msp->ms_sm) -
+           space_map_alloc_delta(msp->ms_sm);
+
+       /*
+        * Account for future allocations since we would have already
+        * deducted that space from the ms_freetree.
+        */
+       for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
+               allocated +=
+                   range_tree_space(msp->ms_alloctree[(txg + t) & TXG_MASK]);
+       }
+       freed = range_tree_space(msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]);
+
+       msp_free_space = range_tree_space(msp->ms_tree) + allocated +
+           msp->ms_deferspace + freed;
+
+       VERIFY3U(sm_free_space, ==, msp_free_space);
+}
+
+/*
+ * ==========================================================================
+ * Metaslab groups
+ * ==========================================================================
+ */
+/*
  * Update the allocatable flag and the metaslab group's capacity.
  * The allocatable flag is set to true if the capacity is below
  * the zfs_mg_noalloc_threshold or has a fragmentation value that is
@@ -1078,7 +1150,7 @@ static range_tree_ops_t metaslab_rt_ops 
 
 /*
  * ==========================================================================
- * Metaslab block operations
+ * Common allocator routines
  * ==========================================================================
  */
 
@@ -1097,31 +1169,22 @@ metaslab_block_maxsize(metaslab_t *msp)
        return (rs->rs_end - rs->rs_start);
 }
 
-uint64_t
-metaslab_block_alloc(metaslab_t *msp, uint64_t size)
+static range_seg_t *
+metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
 {
-       uint64_t start;
-       range_tree_t *rt = msp->ms_tree;
-
-       VERIFY(!msp->ms_condensing);
+       range_seg_t *rs, rsearch;
+       avl_index_t where;
 
-       start = msp->ms_ops->msop_alloc(msp, size);
-       if (start != -1ULL) {
-               vdev_t *vd = msp->ms_group->mg_vd;
+       rsearch.rs_start = start;
+       rsearch.rs_end = start + size;
 
-               VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
-               VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
-               VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
-               range_tree_remove(rt, start, size);
+       rs = avl_find(t, &rsearch, &where);
+       if (rs == NULL) {
+               rs = avl_nearest(t, where, AVL_AFTER);
        }
-       return (start);
-}
 
-/*
- * ==========================================================================
- * Common allocator routines
- * ==========================================================================
- */
+       return (rs);
+}
 
 /*
  * This is a helper function that can be used by the allocator to find
@@ -1132,15 +1195,7 @@ static uint64_t
 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
     uint64_t align)
 {
-       range_seg_t *rs, rsearch;
-       avl_index_t where;
-
-       rsearch.rs_start = *cursor;
-       rsearch.rs_end = *cursor + size;
-
-       rs = avl_find(t, &rsearch, &where);
-       if (rs == NULL)
-               rs = avl_nearest(t, where, AVL_AFTER);
+       range_seg_t *rs = metaslab_block_find(t, *cursor, size);
 
        while (rs != NULL) {
                uint64_t offset = P2ROUNDUP(rs->rs_start, align);
@@ -1365,6 +1420,7 @@ int
 metaslab_load(metaslab_t *msp)
 {
        int error = 0;
+       boolean_t success = B_FALSE;
 
        ASSERT(MUTEX_HELD(&msp->ms_lock));
        ASSERT(!msp->ms_loaded);
@@ -1382,14 +1438,18 @@ metaslab_load(metaslab_t *msp)
        else
                range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
 
-       msp->ms_loaded = (error == 0);
+       success = (error == 0);
        msp->ms_loading = B_FALSE;
 
-       if (msp->ms_loaded) {
+       if (success) {
+               ASSERT3P(msp->ms_group, !=, NULL);
+               msp->ms_loaded = B_TRUE;
+
                for (int t = 0; t < TXG_DEFER_SIZE; t++) {
                        range_tree_walk(msp->ms_defertree[t],
                            range_tree_remove, msp->ms_tree);
                }
+               msp->ms_max_size = metaslab_block_maxsize(msp);
        }
        cv_broadcast(&msp->ms_load_cv);
        return (error);
@@ -1402,6 +1462,7 @@ metaslab_unload(metaslab_t *msp)
        range_tree_vacate(msp->ms_tree, NULL, NULL);
        msp->ms_loaded = B_FALSE;
        msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
+       msp->ms_max_size = 0;
 }
 
 int
@@ -1446,21 +1507,23 @@ metaslab_init(metaslab_group_t *mg, uint
        ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock);
        metaslab_group_add(mg, ms);
 
-       ms->ms_fragmentation = metaslab_fragmentation(ms);
-       ms->ms_ops = mg->mg_class->mc_ops;
+       metaslab_set_fragmentation(ms);
 
        /*
         * If we're opening an existing pool (txg == 0) or creating
         * a new one (txg == TXG_INITIAL), all space is available now.
         * If we're adding space to an existing pool, the new space
         * does not become available until after this txg has synced.
+        * The metaslab's weight will also be initialized when we sync
+        * out this txg. This ensures that we don't attempt to allocate
+        * from it before we have initialized it completely.
         */
        if (txg <= TXG_INITIAL)
                metaslab_sync_done(ms, 0);
 
        /*
         * If metaslab_debug_load is set and we're initializing a metaslab
-        * that has an allocated space_map object then load the its space
+        * that has an allocated space map object then load the its space
         * map so that can verify frees.
         */
        if (metaslab_debug_load && ms->ms_sm != NULL) {
@@ -1487,7 +1550,6 @@ metaslab_fini(metaslab_t *msp)
        metaslab_group_remove(mg, msp);
 
        mutex_enter(&msp->ms_lock);
-
        VERIFY(msp->ms_group == NULL);
        vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
            0, -msp->ms_size);
@@ -1560,8 +1622,8 @@ int zfs_frag_table[FRAGMENTATION_TABLE_S
  * not support this metric. Otherwise, the return value should be in the
  * range [0, 100].
  */
-static uint64_t
-metaslab_fragmentation(metaslab_t *msp)
+static void
+metaslab_set_fragmentation(metaslab_t *msp)
 {
        spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
        uint64_t fragmentation = 0;
@@ -1569,18 +1631,22 @@ metaslab_fragmentation(metaslab_t *msp)
        boolean_t feature_enabled = spa_feature_is_enabled(spa,
            SPA_FEATURE_SPACEMAP_HISTOGRAM);
 
-       if (!feature_enabled)
-               return (ZFS_FRAG_INVALID);
+       if (!feature_enabled) {
+               msp->ms_fragmentation = ZFS_FRAG_INVALID;
+               return;
+       }
 
        /*
         * A null space map means that the entire metaslab is free
         * and thus is not fragmented.
         */
-       if (msp->ms_sm == NULL)
-               return (0);
+       if (msp->ms_sm == NULL) {
+               msp->ms_fragmentation = 0;
+               return;
+       }
 
        /*
-        * If this metaslab's space_map has not been upgraded, flag it
+        * If this metaslab's space map has not been upgraded, flag it
         * so that we upgrade next time we encounter it.
         */
        if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
@@ -1593,12 +1659,14 @@ metaslab_fragmentation(metaslab_t *msp)
                        spa_dbgmsg(spa, "txg %llu, requesting force condense: "
                            "msp %p, vd %p", txg, msp, vd);
                }
-               return (ZFS_FRAG_INVALID);
+               msp->ms_fragmentation = ZFS_FRAG_INVALID;
+               return;
        }
 
        for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
                uint64_t space = 0;
                uint8_t shift = msp->ms_sm->sm_shift;
+
                int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
                    FRAGMENTATION_TABLE_SIZE - 1);
 
@@ -1615,7 +1683,8 @@ metaslab_fragmentation(metaslab_t *msp)
        if (total > 0)
                fragmentation /= total;
        ASSERT3U(fragmentation, <=, 100);
-       return (fragmentation);
+
+       msp->ms_fragmentation = fragmentation;
 }
 
 /*
@@ -1624,30 +1693,20 @@ metaslab_fragmentation(metaslab_t *msp)
  * the LBA range, and whether the metaslab is loaded.
  */
 static uint64_t
-metaslab_weight(metaslab_t *msp)
+metaslab_space_weight(metaslab_t *msp)
 {
        metaslab_group_t *mg = msp->ms_group;
        vdev_t *vd = mg->mg_vd;
        uint64_t weight, space;
 
        ASSERT(MUTEX_HELD(&msp->ms_lock));
-
-       /*
-        * This vdev is in the process of being removed so there is nothing
-        * for us to do here.
-        */
-       if (vd->vdev_removing) {
-               ASSERT0(space_map_allocated(msp->ms_sm));
-               ASSERT0(vd->vdev_ms_shift);
-               return (0);
-       }
+       ASSERT(!vd->vdev_removing);
 
        /*
         * The baseline weight is the metaslab's free space.
         */
        space = msp->ms_size - space_map_allocated(msp->ms_sm);
 
-       msp->ms_fragmentation = metaslab_fragmentation(msp);
        if (metaslab_fragmentation_factor_enabled &&
            msp->ms_fragmentation != ZFS_FRAG_INVALID) {
                /*
@@ -1696,6 +1755,210 @@ metaslab_weight(metaslab_t *msp)
                weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
        }
 
+       WEIGHT_SET_SPACEBASED(weight);
+       return (weight);
+}
+
+/*
+ * Return the weight of the specified metaslab, according to the segment-based
+ * weighting algorithm. The metaslab must be loaded. This function can
+ * be called within a sync pass since it relies only on the metaslab's
+ * range tree which is always accurate when the metaslab is loaded.
+ */
+static uint64_t
+metaslab_weight_from_range_tree(metaslab_t *msp)
+{
+       uint64_t weight = 0;
+       uint32_t segments = 0;
+
+       ASSERT(msp->ms_loaded);
+
+       for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
+           i--) {
+               uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
+               int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
+
+               segments <<= 1;
+               segments += msp->ms_tree->rt_histogram[i];
+
+               /*
+                * The range tree provides more precision than the space map
+                * and must be downgraded so that all values fit within the
+                * space map's histogram. This allows us to compare loaded
+                * vs. unloaded metaslabs to determine which metaslab is
+                * considered "best".
+                */
+               if (i > max_idx)
+                       continue;
+
+               if (segments != 0) {
+                       WEIGHT_SET_COUNT(weight, segments);
+                       WEIGHT_SET_INDEX(weight, i);
+                       WEIGHT_SET_ACTIVE(weight, 0);
+                       break;
+               }
+       }
+       return (weight);
+}
+
+/*
+ * Calculate the weight based on the on-disk histogram. This should only
+ * be called after a sync pass has completely finished since the on-disk
+ * information is updated in metaslab_sync().
+ */
+static uint64_t
+metaslab_weight_from_spacemap(metaslab_t *msp)
+{
+       uint64_t weight = 0;
+
+       for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
+               if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) {
+                       WEIGHT_SET_COUNT(weight,
+                           msp->ms_sm->sm_phys->smp_histogram[i]);
+                       WEIGHT_SET_INDEX(weight, i +
+                           msp->ms_sm->sm_shift);
+                       WEIGHT_SET_ACTIVE(weight, 0);
+                       break;
+               }
+       }
+       return (weight);
+}
+
+/*
+ * Compute a segment-based weight for the specified metaslab. The weight
+ * is determined by highest bucket in the histogram. The information
+ * for the highest bucket is encoded into the weight value.
+ */
+static uint64_t
+metaslab_segment_weight(metaslab_t *msp)
+{
+       metaslab_group_t *mg = msp->ms_group;
+       uint64_t weight = 0;
+       uint8_t shift = mg->mg_vd->vdev_ashift;
+
+       ASSERT(MUTEX_HELD(&msp->ms_lock));
+
+       /*
+        * The metaslab is completely free.
+        */
+       if (space_map_allocated(msp->ms_sm) == 0) {
+               int idx = highbit64(msp->ms_size) - 1;
+               int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
+
+               if (idx < max_idx) {
+                       WEIGHT_SET_COUNT(weight, 1ULL);
+                       WEIGHT_SET_INDEX(weight, idx);
+               } else {
+                       WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
+                       WEIGHT_SET_INDEX(weight, max_idx);
+               }
+               WEIGHT_SET_ACTIVE(weight, 0);
+               ASSERT(!WEIGHT_IS_SPACEBASED(weight));
+
+               return (weight);
+       }
+
+       ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
+
+       /*
+        * If the metaslab is fully allocated then just make the weight 0.
+        */
+       if (space_map_allocated(msp->ms_sm) == msp->ms_size)
+               return (0);
+       /*
+        * If the metaslab is already loaded, then use the range tree to
+        * determine the weight. Otherwise, we rely on the space map information
+        * to generate the weight.
+        */
+       if (msp->ms_loaded) {
+               weight = metaslab_weight_from_range_tree(msp);
+       } else {
+               weight = metaslab_weight_from_spacemap(msp);
+       }
+
+       /*
+        * If the metaslab was active the last time we calculated its weight
+        * then keep it active. We want to consume the entire region that
+        * is associated with this weight.
+        */
+       if (msp->ms_activation_weight != 0 && weight != 0)
+               WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
+       return (weight);
+}
+
+/*
+ * Determine if we should attempt to allocate from this metaslab. If the
+ * metaslab has a maximum size then we can quickly determine if the desired
+ * allocation size can be satisfied. Otherwise, if we're using segment-based
+ * weighting then we can determine the maximum allocation that this metaslab
+ * can accommodate based on the index encoded in the weight. If we're using
+ * space-based weights then rely on the entire weight (excluding the weight
+ * type bit).
+ */
+boolean_t
+metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
+{
+       boolean_t should_allocate;
+
+       if (msp->ms_max_size != 0)
+               return (msp->ms_max_size >= asize);
+
+       if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
+               /*
+                * The metaslab segment weight indicates segments in the
+                * range [2^i, 2^(i+1)), where i is the index in the weight.
+                * Since the asize might be in the middle of the range, we
+                * should attempt the allocation if asize < 2^(i+1).
+                */
+               should_allocate = (asize <
+                   1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
+       } else {
+               should_allocate = (asize <=
+                   (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
+       }
+       return (should_allocate);
+}
+
+static uint64_t
+metaslab_weight(metaslab_t *msp)
+{
+       vdev_t *vd = msp->ms_group->mg_vd;
+       spa_t *spa = vd->vdev_spa;
+       uint64_t weight;
+
+       ASSERT(MUTEX_HELD(&msp->ms_lock));
+
+       /*
+        * This vdev is in the process of being removed so there is nothing
+        * for us to do here.
+        */
+       if (vd->vdev_removing) {
+               ASSERT0(space_map_allocated(msp->ms_sm));
+               ASSERT0(vd->vdev_ms_shift);
+               return (0);
+       }
+
+       metaslab_set_fragmentation(msp);
+
+       /*
+        * Update the maximum size if the metaslab is loaded. This will
+        * ensure that we get an accurate maximum size if newly freed space
+        * has been added back into the free tree.
+        */
+       if (msp->ms_loaded)
+               msp->ms_max_size = metaslab_block_maxsize(msp);
+
+       /*
+        * Segment-based weighting requires space map histogram support.
+        */
+       if (zfs_metaslab_segment_weight_enabled &&
+           spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
+           (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
+           sizeof (space_map_phys_t))) {
+               weight = metaslab_segment_weight(msp);
+       } else {
+               weight = metaslab_space_weight(msp);
+       }
        return (weight);
 }
 
@@ -1714,6 +1977,7 @@ metaslab_activate(metaslab_t *msp, uint6
                        }
                }
 
+               msp->ms_activation_weight = msp->ms_weight;
                metaslab_group_sort(msp->ms_group, msp,
                    msp->ms_weight | activation_weight);
        }
@@ -1724,18 +1988,56 @@ metaslab_activate(metaslab_t *msp, uint6
 }
 
 static void
-metaslab_passivate(metaslab_t *msp, uint64_t size)
+metaslab_passivate(metaslab_t *msp, uint64_t weight)
 {
+       uint64_t size = weight & ~METASLAB_WEIGHT_TYPE;
+
        /*
         * If size < SPA_MINBLOCKSIZE, then we will not allocate from
         * this metaslab again.  In that case, it had better be empty,
         * or we would be leaving space on the table.
         */
-       ASSERT(size >= SPA_MINBLOCKSIZE || range_tree_space(msp->ms_tree) == 0);
-       metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
+       ASSERT(size >= SPA_MINBLOCKSIZE ||
+           range_tree_space(msp->ms_tree) == 0);
+       ASSERT0(weight & METASLAB_ACTIVE_MASK);
+
+       msp->ms_activation_weight = 0;
+       metaslab_group_sort(msp->ms_group, msp, weight);
        ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
 }
 
+/*
+ * Segment-based metaslabs are activated once and remain active until
+ * we either fail an allocation attempt (similar to space-based metaslabs)
+ * or have exhausted the free space in zfs_metaslab_switch_threshold
+ * buckets since the metaslab was activated. This function checks to see
+ * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
+ * metaslab and passivates it proactively. This will allow us to select a
+ * metaslabs with larger contiguous region if any remaining within this
+ * metaslab group. If we're in sync pass > 1, then we continue using this
+ * metaslab so that we don't dirty more block and cause more sync passes.
+ */
+void
+metaslab_segment_may_passivate(metaslab_t *msp)
+{
+       spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
+
+       if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
+               return;
+
+       /*
+        * Since we are in the middle of a sync pass, the most accurate
+        * information that is accessible to us is the in-core range tree
+        * histogram; calculate the new weight based on that information.
+        */
+       uint64_t weight = metaslab_weight_from_range_tree(msp);
+       int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
+       int current_idx = WEIGHT_GET_INDEX(weight);
+
+       if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
+               metaslab_passivate(msp, weight);
+}
+
 static void
 metaslab_preload(void *arg)
 {
@@ -1748,11 +2050,7 @@ metaslab_preload(void *arg)
        metaslab_load_wait(msp);
        if (!msp->ms_loaded)
                (void) metaslab_load(msp);
-
-       /*
-        * Set the ms_access_txg value so that we don't unload it right away.
-        */
-       msp->ms_access_txg = spa_syncing_txg(spa) + metaslab_unload_delay + 1;
+       msp->ms_selected_txg = spa_syncing_txg(spa);
        mutex_exit(&msp->ms_lock);
 }
 
@@ -1773,10 +2071,7 @@ metaslab_group_preload(metaslab_group_t 
        /*
         * Load the next potential metaslabs
         */
-       msp = avl_first(t);
-       while (msp != NULL) {
-               metaslab_t *msp_next = AVL_NEXT(t, msp);
-
+       for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
                /*
                 * We preload only the maximum number of metaslabs specified
                 * by metaslab_preload_limit. If a metaslab is being forced
@@ -1784,27 +2079,11 @@ metaslab_group_preload(metaslab_group_t 
                 * that force condensing happens in the next txg.
                 */
                if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
-                       msp = msp_next;
                        continue;
                }
 
-               /*
-                * We must drop the metaslab group lock here to preserve
-                * lock ordering with the ms_lock (when grabbing both
-                * the mg_lock and the ms_lock, the ms_lock must be taken
-                * first).  As a result, it is possible that the ordering
-                * of the metaslabs within the avl tree may change before
-                * we reacquire the lock. The metaslab cannot be removed from
-                * the tree while we're in syncing context so it is safe to
-                * drop the mg_lock here. If the metaslabs are reordered
-                * nothing will break -- we just may end up loading a
-                * less than optimal one.
-                */
-               mutex_exit(&mg->mg_lock);
                VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
                    msp, TQ_SLEEP) != 0);
-               mutex_enter(&mg->mg_lock);
-               msp = msp_next;
        }
        mutex_exit(&mg->mg_lock);
 }
@@ -1953,7 +2232,7 @@ metaslab_condense(metaslab_t *msp, uint6
        mutex_enter(&msp->ms_lock);
 
        /*
-        * While we would ideally like to create a space_map representation
+        * While we would ideally like to create a space map representation
         * that consists only of allocation records, doing so can be
         * prohibitively expensive because the in-core free tree can be
         * large, and therefore computationally expensive to subtract
@@ -2016,7 +2295,7 @@ metaslab_sync(metaslab_t *msp, uint64_t 
         * metaslab_sync() is the metaslab's ms_tree.  No other thread can
         * be modifying this txg's alloctree, freetree, freed_tree, or
         * space_map_phys_t. Therefore, we only hold ms_lock to satify
-        * space_map ASSERTs. We drop it whenever we call into the DMU,
+        * space map ASSERTs. We drop it whenever we call into the DMU,
         * because the DMU can call down to us (e.g. via zio_free()) at
         * any time.
         */
@@ -2038,7 +2317,7 @@ metaslab_sync(metaslab_t *msp, uint64_t 
        mutex_enter(&msp->ms_lock);
 
        /*
-        * Note: metaslab_condense() clears the space_map's histogram.
+        * Note: metaslab_condense() clears the space map's histogram.
         * Therefore we must verify and remove this histogram before
         * condensing.
         */
@@ -2063,16 +2342,38 @@ metaslab_sync(metaslab_t *msp, uint64_t 
                 */
                space_map_histogram_clear(msp->ms_sm);
                space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
-       } else {
+
+               /*
+                * Since we've cleared the histogram we need to add back
+                * any free space that has already been processed, plus
+                * any deferred space. This allows the on-disk histogram
+                * to accurately reflect all free space even if some space
+                * is not yet available for allocation (i.e. deferred).
+                */
+               space_map_histogram_add(msp->ms_sm, *freed_tree, tx);
+
                /*
-                * Since the space map is not loaded we simply update the
-                * exisiting histogram with what was freed in this txg. This
-                * means that the on-disk histogram may not have an accurate
-                * view of the free space but it's close enough to allow
-                * us to make allocation decisions.
+                * Add back any deferred free space that has not been
+                * added back into the in-core free tree yet. This will
+                * ensure that we don't end up with a space map histogram
+                * that is completely empty unless the metaslab is fully
+                * allocated.
                 */
-               space_map_histogram_add(msp->ms_sm, *freetree, tx);
+               for (int t = 0; t < TXG_DEFER_SIZE; t++) {
+                       space_map_histogram_add(msp->ms_sm,
+                           msp->ms_defertree[t], tx);
+               }
        }
+
+       /*
+        * Always add the free space from this sync pass to the space
+        * map histogram. We want to make sure that the on-disk histogram
+        * accounts for all free space. If the space map is not loaded,
+        * then we will lose some accuracy but will correct it the next
+        * time we load the space map.
+        */
+       space_map_histogram_add(msp->ms_sm, *freetree, tx);
+
        metaslab_group_histogram_add(mg, msp);
        metaslab_group_histogram_verify(mg);
        metaslab_class_histogram_verify(mg->mg_class);
@@ -2091,6 +2392,7 @@ metaslab_sync(metaslab_t *msp, uint64_t 
        range_tree_vacate(alloctree, NULL, NULL);
 
        ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
+       ASSERT0(range_tree_space(msp->ms_alloctree[TXG_CLEAN(txg) & TXG_MASK]));
        ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
 
        mutex_exit(&msp->ms_lock);
@@ -2112,9 +2414,11 @@ metaslab_sync_done(metaslab_t *msp, uint
 {
        metaslab_group_t *mg = msp->ms_group;
        vdev_t *vd = mg->mg_vd;
+       spa_t *spa = vd->vdev_spa;
        range_tree_t **freed_tree;
        range_tree_t **defer_tree;
        int64_t alloc_delta, defer_delta;
+       boolean_t defer_allowed = B_TRUE;
 
        ASSERT(!vd->vdev_ishole);
 
@@ -2149,9 +2453,20 @@ metaslab_sync_done(metaslab_t *msp, uint
        freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
        defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
 
+       uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
+           metaslab_class_get_alloc(spa_normal_class(spa));
+       if (free_space <= spa_get_slop_space(spa)) {
+               defer_allowed = B_FALSE;
+       }
+
+       defer_delta = 0;
        alloc_delta = space_map_alloc_delta(msp->ms_sm);
-       defer_delta = range_tree_space(*freed_tree) -
-           range_tree_space(*defer_tree);
+       if (defer_allowed) {
+               defer_delta = range_tree_space(*freed_tree) -
+                   range_tree_space(*defer_tree);
+       } else {
+               defer_delta -= range_tree_space(*defer_tree);
+       }
 
        vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
 
@@ -2172,7 +2487,12 @@ metaslab_sync_done(metaslab_t *msp, uint
         */
        range_tree_vacate(*defer_tree,
            msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
-       range_tree_swap(freed_tree, defer_tree);
+       if (defer_allowed) {
+               range_tree_swap(freed_tree, defer_tree);
+       } else {
+               range_tree_vacate(*freed_tree,
+                   msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
+       }
 
        space_map_update(msp->ms_sm);
 
@@ -2187,7 +2507,18 @@ metaslab_sync_done(metaslab_t *msp, uint
                vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
        }
 
-       if (msp->ms_loaded && msp->ms_access_txg < txg) {
+       /*
+        * Calculate the new weights before unloading any metaslabs.
+        * This will give us the most accurate weighting.
+        */
+       metaslab_group_sort(mg, msp, metaslab_weight(msp));
+
+       /*
+        * If the metaslab is loaded and we've not tried to load or allocate
+        * from it in 'metaslab_unload_delay' txgs, then unload it.
+        */
+       if (msp->ms_loaded &&
+           msp->ms_selected_txg + metaslab_unload_delay < txg) {
                for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
                        VERIFY0(range_tree_space(
                            msp->ms_alloctree[(txg + t) & TXG_MASK]));
@@ -2197,7 +2528,6 @@ metaslab_sync_done(metaslab_t *msp, uint
                        metaslab_unload(msp);
        }
 
-       metaslab_group_sort(mg, msp, metaslab_weight(msp));
        mutex_exit(&msp->ms_lock);
 }
 
@@ -2232,6 +2562,113 @@ metaslab_distance(metaslab_t *msp, dva_t
 
 /*
  * ==========================================================================

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to