refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.

Signed-off-by: NitinGote <nitin.r.g...@intel.com>
---
 security/selinux/ss/sidtab.c | 16 ++++++++--------
 security/selinux/ss/sidtab.h |  2 +-
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index e63a90ff2728..20fe235c6c71 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -29,7 +29,7 @@ int sidtab_init(struct sidtab *s)
        for (i = 0; i < SECINITSID_NUM; i++)
                s->isids[i].set = 0;

-       atomic_set(&s->count, 0);
+       refcount_set(&s->count, 0);

        s->convert = NULL;

@@ -130,7 +130,7 @@ static struct context *sidtab_do_lookup(struct sidtab *s, 
u32 index, int alloc)

 static struct context *sidtab_lookup(struct sidtab *s, u32 index)
 {
-       u32 count = (u32)atomic_read(&s->count);
+       u32 count = refcount_read(&s->count);

        if (index >= count)
                return NULL;
@@ -245,7 +245,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct 
context *context,
                                 u32 *index)
 {
        unsigned long flags;
-       u32 count = (u32)atomic_read(&s->count);
+       u32 count = (u32)refcount_read(&s->count);
        u32 count_locked, level, pos;
        struct sidtab_convert_params *convert;
        struct context *dst, *dst_convert;
@@ -272,7 +272,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct 
context *context,
        spin_lock_irqsave(&s->lock, flags);

        convert = s->convert;
-       count_locked = (u32)atomic_read(&s->count);
+       count_locked = (u32)refcount_read(&s->count);
        level = sidtab_level_from_count(count_locked);

        /* if count has changed before we acquired the lock, then catch up */
@@ -315,7 +315,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct 
context *context,
                }

                /* at this point we know the insert won't fail */
-               atomic_set(&convert->target->count, count + 1);
+               refcount_set(&convert->target->count, count + 1);
        }

        if (context->len)
@@ -328,7 +328,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct 
context *context,
        /* write entries before writing new count */
        smp_wmb();

-       atomic_set(&s->count, count + 1);
+       refcount_set(&s->count, count + 1);

        rc = 0;
 out_unlock:
@@ -418,7 +418,7 @@ int sidtab_convert(struct sidtab *s, struct 
sidtab_convert_params *params)
                return -EBUSY;
        }

-       count = (u32)atomic_read(&s->count);
+       count = (u32)refcount_read(&s->count);
        level = sidtab_level_from_count(count);

        /* allocate last leaf in the new sidtab (to avoid race with
@@ -431,7 +431,7 @@ int sidtab_convert(struct sidtab *s, struct 
sidtab_convert_params *params)
        }

        /* set count in case no new entries are added during conversion */
-       atomic_set(&params->target->count, count);
+       refcount_set(&params->target->count, count);

        /* enable live convert of new entries */
        s->convert = params;
diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h
index bbd5c0d1f3bd..68dd96a5beba 100644
--- a/security/selinux/ss/sidtab.h
+++ b/security/selinux/ss/sidtab.h
@@ -70,7 +70,7 @@ struct sidtab_convert_params {

 struct sidtab {
        union sidtab_entry_inner roots[SIDTAB_MAX_LEVEL + 1];
-       atomic_t count;
+       refcount_t count;
        struct sidtab_convert_params *convert;
        spinlock_t lock;

--
2.17.1

Reply via email to