LGTM,

Acked-by: Jarno Rajahalme <jrajaha...@nicira.com>

Thanks,

  Jarno


> On Nov 20, 2014, at 4:37 PM, Alex Wang <al...@nicira.com> wrote:
> 
> On current master, the exact match cache entry can keep reference to
> 'struct dp_netdev_flow' even after the flow is removed from the flow
> table.  This means the free of allocated memory of the flow is delayed
> until the exact match cache entry is cleared or replaced.
> 
> If the allocated memory is ahead of chunks of freed memory on heap,
> the delay will prevent the reclaim of those freed chunks, causing
> falsely high memory utilization.
> 
> To fix the issue, this commit makes the owning thread conduct periodic
> garbage collection on the exact match cache and clear dead entries.
> 
> Signed-off-by: Alex Wang <al...@nicira.com>
> 
> ---
> PATCH -> V2:
> - Adopt Jarno's suggestion and conduct slow sweep to avoid introducing
>  jitter.
> ---
> lib/dpif-netdev.c |   17 +++++++++++++++++
> 1 file changed, 17 insertions(+)
> 
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index 65df19b..265f497 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -129,6 +129,7 @@ struct emc_entry {
> 
> struct emc_cache {
>     struct emc_entry entries[EM_FLOW_HASH_ENTRIES];
> +    int sweep_idx;                /* For emc_cache_slow_sweep(). */
> };
> 
> /* Iterate in the exact match cache through every entry that might contain a
> @@ -433,6 +434,7 @@ static void dp_netdev_del_pmds_on_numa(struct dp_netdev 
> *dp, int numa_id);
> static void dp_netdev_set_pmds_on_numa(struct dp_netdev *dp, int numa_id);
> static void dp_netdev_reset_pmd_threads(struct dp_netdev *dp);
> 
> +static inline bool emc_entry_alive(struct emc_entry *ce);
> static void emc_clear_entry(struct emc_entry *ce);
> 
> static void
> @@ -442,6 +444,7 @@ emc_cache_init(struct emc_cache *flow_cache)
> 
>     BUILD_ASSERT(offsetof(struct miniflow, inline_values) == 
> sizeof(uint64_t));
> 
> +    flow_cache->sweep_idx = 0;
>     for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
>         flow_cache->entries[i].flow = NULL;
>         flow_cache->entries[i].key.hash = 0;
> @@ -462,6 +465,19 @@ emc_cache_uninit(struct emc_cache *flow_cache)
>     }
> }
> 
> +/* Check and clear dead flow references slowly (one entry at each
> + * invocation).  */
> +static void
> +emc_cache_slow_sweep(struct emc_cache *flow_cache)
> +{
> +    struct emc_entry *entry = &flow_cache->entries[flow_cache->sweep_idx];
> +
> +    if (!emc_entry_alive(entry)) {
> +        emc_clear_entry(entry);
> +    }
> +    flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK;
> +}
> +
> static struct dpif_netdev *
> dpif_netdev_cast(const struct dpif *dpif)
> {
> @@ -2332,6 +2348,7 @@ reload:
> 
>             lc = 0;
> 
> +            emc_cache_slow_sweep(&pmd->flow_cache);
>             ovsrcu_quiesce();
> 
>             atomic_read_relaxed(&pmd->change_seq, &seq);
> -- 
> 1.7.9.5
> 
_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to