Re: [PATCH 2/3] binfmt_elf: Verify signature of signed elf binary

2013-01-17 Thread Elena Reshetova
>> > Ok, that's the point I am missing. So I can sign a file and signatures
>> > are in a separate file. And these signatures are installed in extended
>> > attributes at file installation time (IOW rpm installation time) on
>> > target.
>> >
>> > If all this works, this sounds reasonable so far. Except the point of
>> > disabling ptrace and locking down memory.
>> >
>> > So what's the state of above work. Is there something I can play with.

Let me try to comment on this one a bit.
Thewhole idea behind extending rpm plugin interface was to have an extensive
 set of hooks that would allow rpm plugins to perform needed additional things.
Plugins can be different dependening on a ditsibution need, and if a
distribution
needs to bootstrap IMA signatures, this can also be done in  one of
plugins hooks.

Now about hook status: we have so far integrated to rpm master branch
only a subset of hooks.
Mainly the cause has been that I am far from working on it all the
time unfortunately.
Currently I am looking at the filesystem hooks and hoping to send some
version of that patch soon.

When the hooks will be integrated,it is really up to plugin
implementor to design how thing wil happen.
There will be a hook that would be called after file from a package is
placed to filesystem, where
plugin can do many things, like setting MAC labels or setting IMA
signatures on a file.
The way signature will be stored in a package is also currently open,
there can be a number of options here.
You can define a special rpm header TAG and during package build
embeed all the informaiton about
signatures there together with the file name. This way plugin can
parse the header tag info, get all signatures info
and when the right hook is called, setup the IMA signature attribute.
But as I said, this is just one way of doing it
and may not be the best one.

Best Regards,
Elena.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[RFC PATCH] x86/entry/64: randomize kernel stack offset upon syscall

2019-03-18 Thread Elena Reshetova
zation is done upon
syscall entry and not the exit, as with RANDKSTACK.

Also, as a result of the above two differences, the implementation
of RANDKSTACK and RANDOMIZE_KSTACK_OFFSET has nothing in common.

[4] https://www.openwall.com/lists/kernel-hardening/2019/02/08/6

Signed-off-by: Elena Reshetova 
---
 arch/Kconfig   | 15 +++
 arch/x86/Kconfig   |  1 +
 arch/x86/entry/calling.h   | 14 ++
 arch/x86/entry/entry_64.S  |  6 ++
 arch/x86/include/asm/frame.h   |  3 +++
 arch/x86/kernel/dumpstack.c| 10 +-
 arch/x86/kernel/unwind_frame.c |  9 -
 7 files changed, 56 insertions(+), 2 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index 4cfb6de48f79..9a2557b0cfce 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -808,6 +808,21 @@ config VMAP_STACK
  the stack to map directly to the KASAN shadow map using a formula
  that is incorrect if the stack is in vmalloc space.
 
+config HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
+   def_bool n
+   help
+ An arch should select this symbol if it can support kernel stack
+ offset randomization.
+
+config RANDOMIZE_KSTACK_OFFSET
+   default n
+   bool "Randomize kernel stack offset on syscall entry"
+   depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
+   help
+ Enable this if you want the randomize kernel stack offset upon
+ each syscall entry. This causes kernel stack (after pt_regs) to
+ have a randomized offset upon executing each system call.
+
 config ARCH_OPTIONAL_KERNEL_RWX
def_bool n
 
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ade12ec4224b..5edcae945b73 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -131,6 +131,7 @@ config X86
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
select HAVE_ARCH_VMAP_STACK if X86_64
+   select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET  if X86_64
select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index efb0d1b1f15f..68502645d812 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -345,6 +345,20 @@ For 32-bit we have the following conventions - kernel is 
built with
 #endif
 .endm
 
+.macro RANDOMIZE_KSTACK
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+   /* prepare a random offset in rax */
+   pushq %rax
+   xorq  %rax, %rax
+   ALTERNATIVE "rdtsc", "rdrand %rax", X86_FEATURE_RDRAND
+   andq  $__MAX_STACK_RANDOM_OFFSET, %rax
+
+   /* store offset in r15 */
+   movq  %rax, %r15
+   popq  %rax
+#endif
+.endm
+
 /*
  * This does 'call enter_from_user_mode' unless we can avoid it based on
  * kernel config or using the static jump infrastructure.
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 1f0efdb7b629..0816ec680c21 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -167,13 +167,19 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
 
PUSH_AND_CLEAR_REGS rax=$-ENOSYS
 
+   RANDOMIZE_KSTACK/* stores randomized offset in r15 */
+
TRACE_IRQS_OFF
 
/* IRQs are off. */
movq%rax, %rdi
movq%rsp, %rsi
+   sub %r15, %rsp  /* substitute random offset from rsp */
calldo_syscall_64   /* returns with IRQs disabled */
 
+   /* need to restore the gap */
+   add %r15, %rsp   /* add random offset back to rsp */
+
TRACE_IRQS_IRETQ/* we're about to change IF */
 
/*
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index 5cbce6fbb534..e1bb91504f6e 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -4,6 +4,9 @@
 
 #include 
 
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+#define __MAX_STACK_RANDOM_OFFSET 0xFF0
+#endif
 /*
  * These are stack frame creation macros.  They should be used by every
  * callable non-leaf asm function to make kernel stack traces more reliable.
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 2b5886401e5f..4146a4c3e9c6 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -192,7 +192,6 @@ void show_trace_log_lvl(struct task_struct *task, struct 
pt_regs *regs,
 */
for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
const char *stack_name;
-
if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
/*
 * We weren't on a valid stack.  It's possible that
@@ -224,6 +223,9 @@ void show_trace_log_lvl(struct task_struct *task, struct 
pt_regs *regs,
 */
for (; stack < stack_info.end; stack++) {
  

[ANNOUNCE][CFP] Linux Security Summit Europe 2020

2020-05-07 Thread Elena Reshetova
==
   ANNOUNCEMENT AND CALL FOR PARTICIPATION

LINUX SECURITY SUMMIT EUROPE 2020

 29-30 OCTOBER
DUBLIN, IRELAND
==

DESCRIPTION

Linux Security Summit Europe (LSS-EU) is a technical forum for
collaboration between Linux developers, researchers, and end-users.  Its
primary aim is to foster community efforts in analyzing and solving Linux
security challenges.

 The program committee currently seeks proposals for:

   * Refereed Presentations:
 45 minutes in length.

   * Panel Discussion Topics:
 45 minutes in length.

   * Short Topics:
 30 minutes in total, including at least 10 minutes discussion.

   * Tutorials
 90 minutes in length.

Tutorial sessions should be focused on advanced Linux security defense
topics within areas such as the kernel, compiler, and security-related
libraries.  Priority will be given to tutorials created for this conference,
and those where the presenter a leading subject matter expert on the topic.

Topic areas include, but are not limited to:

   * Kernel self-protection
   * Access control
   * Cryptography and key management
   * Integrity policy and enforcement
   * Hardware Security
   * IoT and embedded security
   * Virtualization and containers
   * System-specific system hardening
   * Case studies
   * Security tools
   * Security UX
   * Emerging technologies, threats & techniques

  Proposals should be submitted via:

   https://events.linuxfoundation.org/linux-security-summit-europe/program/cfp/

DATES

  * CFP close:July 31
  * CFP notifications:August 10
  * Schedule announced:   September 1
  * Event:October 29-30

COVID-19 SITUATION

Currently LSS-EU is planned as in-person event, however this would be
re-evaluated closer to the event itself and if the situation in Europe does
not permit such events, it would be switched to a virtual event, similarly
as this year’s LSS-NA.

WHO SHOULD ATTEND

We're seeking a diverse range of attendees and welcome participation by
people involved in Linux security development, operations, and research.

LSS-EU is a unique global event that provides the opportunity to present and
discuss your work or research with key Linux security community members and
maintainers.  It’s also useful for those who wish to keep up with the latest
in Linux security development and to provide input to the development
process.

WEB SITE

https://events.linuxfoundation.org/linux-security-summit-europe/

TWITTER

  For event updates and announcements, follow:

https://twitter.com/LinuxSecSummit

#linuxsecuritysummit

PROGRAM COMMITTEE

  The program committee for LSS 2020 is:

* James Morris, Microsoft
* Serge Hallyn, Cisco
* Paul Moore, Cisco
* Stephen Smalley, NSA
    * Elena Reshetova, Intel
* John Johansen, Canonical
* Kees Cook, Google
* Casey Schaufler, Intel
* Mimi Zohar, IBM
* David A. Wheeler, Institute for Defense Analyses

  The program committee may be contacted as a group via email:
lss-pc () lists.linuxfoundation.org


[PATCH 0/1] v2, randomize stack offset upon syscall

2019-04-10 Thread Elena Reshetova
Resending the patch since the first attempt never made
it to lkml.

changes in v2:
 - alloca() is changed to __builtin_alloca() in order
   to be compatible with 32 bit versions

Elena Reshetova (1):
  x86/entry/64: randomize kernel stack offset upon syscall

 arch/Kconfig| 15 +++
 arch/x86/Kconfig|  1 +
 arch/x86/entry/common.c | 13 +
 3 files changed, 29 insertions(+)

-- 
2.17.1



[PATCH 1/1] x86/entry/64: randomize kernel stack offset upon syscall

2019-04-10 Thread Elena Reshetova
yscall entry and not the exit, as with RANDKSTACK.

Also, as a result of the above two differences, the implementation
of RANDKSTACK and RANDOMIZE_KSTACK_OFFSET has nothing in common.

[4] https://www.openwall.com/lists/kernel-hardening/2019/02/08/6

Signed-off-by: Elena Reshetova 
---
 arch/Kconfig| 15 +++
 arch/x86/Kconfig|  1 +
 arch/x86/entry/common.c | 13 +
 3 files changed, 29 insertions(+)

diff --git a/arch/Kconfig b/arch/Kconfig
index 4cfb6de48f79..9a2557b0cfce 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -808,6 +808,21 @@ config VMAP_STACK
  the stack to map directly to the KASAN shadow map using a formula
  that is incorrect if the stack is in vmalloc space.
 
+config HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
+   def_bool n
+   help
+ An arch should select this symbol if it can support kernel stack
+ offset randomization.
+
+config RANDOMIZE_KSTACK_OFFSET
+   default n
+   bool "Randomize kernel stack offset on syscall entry"
+   depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
+   help
+ Enable this if you want the randomize kernel stack offset upon
+ each syscall entry. This causes kernel stack (after pt_regs) to
+ have a randomized offset upon executing each system call.
+
 config ARCH_OPTIONAL_KERNEL_RWX
def_bool n
 
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ade12ec4224b..5edcae945b73 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -131,6 +131,7 @@ config X86
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
select HAVE_ARCH_VMAP_STACK if X86_64
+   select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET  if X86_64
select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 7bc105f47d21..58fd17eaca1a 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -35,6 +35,12 @@
 #define CREATE_TRACE_POINTS
 #include 
 
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+#include 
+
+void *__builtin_alloca(size_t size);
+#endif
+
 #ifdef CONFIG_CONTEXT_TRACKING
 /* Called on entry from user mode with IRQs off. */
 __visible inline void enter_from_user_mode(void)
@@ -273,6 +279,13 @@ __visible void do_syscall_64(unsigned long nr, struct 
pt_regs *regs)
 {
struct thread_info *ti;
 
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+   size_t offset = ((size_t)prandom_u32()) % 256;
+   char *ptr = __builtin_alloca(offset);
+
+   asm volatile("":"=m"(*ptr));
+#endif
+
enter_from_user_mode();
local_irq_enable();
ti = current_thread_info();
-- 
2.17.1



[PATCH v3] provide rule for finding refcounters

2017-08-16 Thread Elena Reshetova
changes in v3:
Removed unnessesary rule 4 conditions pointed by Julia.

changes in v2:
Following the suggestion from Julia the first rule is split into
2. The output does not differ that much between these two versions,
but rule became more precise.

Elena Reshetova (1):
  Coccinelle: add atomic_as_refcounter script

 scripts/coccinelle/api/atomic_as_refcounter.cocci | 133 ++
 1 file changed, 133 insertions(+)
 create mode 100644 scripts/coccinelle/api/atomic_as_refcounter.cocci

-- 
2.7.4



[PATCH] Coccinelle: add atomic_as_refcounter script

2017-08-16 Thread Elena Reshetova
atomic_as_refcounter.cocci script allows detecting
cases when refcount_t type and API should be used
instead of atomic_t.

Signed-off-by: Elena Reshetova 
---
 scripts/coccinelle/api/atomic_as_refcounter.cocci | 133 ++
 1 file changed, 133 insertions(+)
 create mode 100644 scripts/coccinelle/api/atomic_as_refcounter.cocci

diff --git a/scripts/coccinelle/api/atomic_as_refcounter.cocci 
b/scripts/coccinelle/api/atomic_as_refcounter.cocci
new file mode 100644
index 000..64c97d1
--- /dev/null
+++ b/scripts/coccinelle/api/atomic_as_refcounter.cocci
@@ -0,0 +1,133 @@
+// Check if refcount_t type and API should be used
+// instead of atomic_t type when dealing with refcounters
+//
+// Copyright (c) 2016-2017, Elena Reshetova, Intel Corporation
+//
+// Confidence: Moderate
+// URL: http://coccinelle.lip6.fr/
+// Options: --include-headers --very-quiet
+
+virtual report
+
+@r1 exists@
+identifier a, x, y;
+position p1, p2;
+identifier fname =~ ".*free.*";
+identifier fname2 =~ ".*destroy.*";
+identifier fname3 =~ ".*del.*";
+identifier fname4 =~ ".*queue_work.*";
+identifier fname5 =~ ".*schedule_work.*";
+identifier fname6 =~ ".*call_rcu.*";
+
+@@
+
+(
+ atomic_dec_and_test@p1(&(a)->x)
+|
+ atomic_dec_and_lock@p1(&(a)->x, ...)
+|
+ atomic_long_dec_and_lock@p1(&(a)->x, ...)
+|
+ atomic_long_dec_and_test@p1(&(a)->x)
+|
+ atomic64_dec_and_test@p1(&(a)->x)
+|
+ local_dec_and_test@p1(&(a)->x)
+)
+...
+(
+ fname@p2(a, ...);
+|
+ fname2@p2(...);
+|
+ fname3@p2(...);
+|
+ fname4@p2(...);
+|
+ fname5@p2(...);
+|
+ fname6@p2(...);
+)
+
+
+@script:python depends on report@
+p1 << r1.p1;
+p2 << r1.p2;
+@@
+msg = "atomic_dec_and_test variation before object free at line %s."
+coccilib.report.print_report(p1[0], msg % (p2[0].line))
+
+@r4 exists@
+identifier a, x, y;
+position p1, p2;
+identifier fname =~ ".*free.*";
+
+@@
+
+(
+ atomic_dec_and_test@p1(&(a)->x)
+|
+ atomic_dec_and_lock@p1(&(a)->x, ...)
+|
+ atomic_long_dec_and_lock@p1(&(a)->x, ...)
+|
+ atomic_long_dec_and_test@p1(&(a)->x)
+|
+ atomic64_dec_and_test@p1(&(a)->x)
+|
+ local_dec_and_test@p1(&(a)->x)
+)
+...
+y=a
+...
+(
+ fname@p2(y, ...);
+)
+
+
+@script:python depends on report@
+p1 << r4.p1;
+p2 << r4.p2;
+@@
+msg = "atomic_dec_and_test variation before object free at line %s."
+coccilib.report.print_report(p1[0], msg % (p2[0].line))
+
+@r2 exists@
+identifier a, x;
+position p1;
+@@
+
+(
+atomic_add_unless(&(a)->x,-1,1)@p1
+|
+atomic_long_add_unless(&(a)->x,-1,1)@p1
+|
+atomic64_add_unless(&(a)->x,-1,1)@p1
+)
+
+@script:python depends on report@
+p1 << r2.p1;
+@@
+msg = "atomic_add_unless"
+coccilib.report.print_report(p1[0], msg)
+
+@r3 exists@
+identifier x;
+position p1;
+@@
+
+(
+x = atomic_add_return@p1(-1, ...);
+|
+x = atomic_long_add_return@p1(-1, ...);
+|
+x = atomic64_add_return@p1(-1, ...);
+)
+
+@script:python depends on report@
+p1 << r3.p1;
+@@
+msg = "x = atomic_add_return(-1, ...)"
+coccilib.report.print_report(p1[0], msg)
+
+
-- 
2.7.4



[PATCH 02/15] drivers, net, ethernet: convert mtk_eth.dma_refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable mtk_eth.dma_refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 8 +---
 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 4 +++-
 2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c 
b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 5e81a72..54adfd9 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1817,7 +1817,7 @@ static int mtk_open(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
 
/* we run 2 netdevs on the same dma ring so we only bring it up once */
-   if (!atomic_read(ð->dma_refcnt)) {
+   if (!refcount_read(ð->dma_refcnt)) {
int err = mtk_start_dma(eth);
 
if (err)
@@ -1827,8 +1827,10 @@ static int mtk_open(struct net_device *dev)
napi_enable(ð->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+   refcount_set(ð->dma_refcnt, 1);
}
-   atomic_inc(ð->dma_refcnt);
+   else
+   refcount_inc(ð->dma_refcnt);
 
phy_start(dev->phydev);
netif_start_queue(dev);
@@ -1868,7 +1870,7 @@ static int mtk_stop(struct net_device *dev)
phy_stop(dev->phydev);
 
/* only shutdown DMA if this is the last user */
-   if (!atomic_dec_and_test(ð->dma_refcnt))
+   if (!refcount_dec_and_test(ð->dma_refcnt))
return 0;
 
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h 
b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 3d3c24a..a3af466 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -15,6 +15,8 @@
 #ifndef MTK_ETH_H
 #define MTK_ETH_H
 
+#include 
+
 #define MTK_QDMA_PAGE_SIZE 2048
 #defineMTK_MAX_RX_LENGTH   1536
 #define MTK_TX_DMA_BUF_LEN 0x3fff
@@ -632,7 +634,7 @@ struct mtk_eth {
struct regmap   *pctl;
u32 chip_id;
boolhwlro;
-   atomic_tdma_refcnt;
+   refcount_t  dma_refcnt;
struct mtk_tx_ring  tx_ring;
struct mtk_rx_ring  rx_ring[MTK_MAX_RX_RING_NUM];
struct mtk_rx_ring  rx_ring_qdma;
-- 
2.7.4



[PATCH 05/15] drivers, net, mlx4: convert mlx4_srq.refcount from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable mlx4_srq.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/net/ethernet/mellanox/mlx4/srq.c | 8 
 include/linux/mlx4/device.h  | 2 +-
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c 
b/drivers/net/ethernet/mellanox/mlx4/srq.c
index bedf521..cbe4d97 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -49,7 +49,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int 
event_type)
srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 
1));
rcu_read_unlock();
if (srq)
-   atomic_inc(&srq->refcount);
+   refcount_inc(&srq->refcount);
else {
mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
return;
@@ -57,7 +57,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int 
event_type)
 
srq->event(srq, event_type);
 
-   if (atomic_dec_and_test(&srq->refcount))
+   if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
 }
 
@@ -203,7 +203,7 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, 
u16 xrcd,
if (err)
goto err_radix;
 
-   atomic_set(&srq->refcount, 1);
+   refcount_set(&srq->refcount, 1);
init_completion(&srq->free);
 
return 0;
@@ -232,7 +232,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq 
*srq)
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
 
-   if (atomic_dec_and_test(&srq->refcount))
+   if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
 
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index b8e19c4..a9b5fed 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -781,7 +781,7 @@ struct mlx4_srq {
int max_gs;
int wqe_shift;
 
-   atomic_trefcount;
+   refcount_t  refcount;
struct completion   free;
 };
 
-- 
2.7.4



[PATCH 00/15] networking drivers refcount_t conversions

2017-10-20 Thread Elena Reshetova
Note: these are the last patches related to networking that perform
conversion of refcounters from atomic_t to refcount_t.
In contrast to the core network refcounter conversions that
were merged earlier, these are much more straightforward ones.

This series, for various networking drivers, replaces atomic_t reference
counters with the new refcount_t type and API (see include/linux/refcount.h).
By doing this we prevent intentional or accidental
underflows or overflows that can led to use-after-free vulnerabilities.

The patches are fully independent and can be cherry-picked separately.
Patches are based on top of net-next.
If there are no objections to the patches, please merge them via respective 
trees

Elena Reshetova (15):
  drivers, net, ethernet: convert clip_entry.refcnt from atomic_t to
refcount_t
  drivers, net, ethernet: convert mtk_eth.dma_refcnt from atomic_t to
refcount_t
  drivers, net, mlx4: convert mlx4_cq.refcount from atomic_t to
refcount_t
  drivers, net, mlx4: convert mlx4_qp.refcount from atomic_t to
refcount_t
  drivers, net, mlx4: convert mlx4_srq.refcount from atomic_t to
refcount_t
  drivers, net, mlx5: convert mlx5_cq.refcount from atomic_t to
refcount_t
  drivers, net, mlx5: convert fs_node.refcount from atomic_t to
refcount_t
  drivers, net, hamradio: convert sixpack.refcnt from atomic_t to
refcount_t
  drivers, net: convert masces_rx_sa.refcnt from atomic_t to refcount_t
  drivers, net: convert masces_rx_sc.refcnt from atomic_t to refcount_t
  drivers, net: convert masces_tx_sa.refcnt from atomic_t to refcount_t
  drivers, net, ppp: convert asyncppp.refcnt from atomic_t to refcount_t
  drivers, net, ppp: convert ppp_file.refcnt from atomic_t to refcount_t
  drivers, net, ppp: convert syncppp.refcnt from atomic_t to refcount_t
  drivers, connector: convert cn_callback_entry.refcnt from atomic_t to
refcount_t

 drivers/connector/cn_queue.c  |  4 ++--
 drivers/connector/connector.c |  2 +-
 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c | 13 +--
 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h |  4 +++-
 drivers/net/ethernet/mediatek/mtk_eth_soc.c   |  8 ---
 drivers/net/ethernet/mediatek/mtk_eth_soc.h   |  4 +++-
 drivers/net/ethernet/mellanox/mlx4/cq.c   |  8 +++
 drivers/net/ethernet/mellanox/mlx4/qp.c   |  8 +++
 drivers/net/ethernet/mellanox/mlx4/srq.c  |  8 +++
 drivers/net/ethernet/mellanox/mlx5/core/cq.c  | 16 ++---
 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 28 +++
 drivers/net/ethernet/mellanox/mlx5/core/fs_core.h |  3 ++-
 drivers/net/hamradio/6pack.c  | 12 +-
 drivers/net/macsec.c  | 25 ++--
 drivers/net/ppp/ppp_async.c   | 10 
 drivers/net/ppp/ppp_generic.c | 21 +
 drivers/net/ppp/ppp_synctty.c | 11 +
 include/linux/connector.h |  4 ++--
 include/linux/mlx4/device.h   |  8 +++
 include/linux/mlx5/cq.h   |  4 ++--
 20 files changed, 105 insertions(+), 96 deletions(-)

-- 
2.7.4



[PATCH 01/15] drivers, net, ethernet: convert clip_entry.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable clip_entry.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c | 13 ++---
 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h |  4 +++-
 2 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c 
b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 3103ef9..2900390 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -96,7 +96,8 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 
*lip, u8 v6)
if (!ret) {
ce = cte;
read_unlock_bh(&ctbl->lock);
-   goto found;
+   refcount_inc(&ce->refcnt);
+   return 0;
}
}
read_unlock_bh(&ctbl->lock);
@@ -108,7 +109,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 
*lip, u8 v6)
list_del(&ce->list);
INIT_LIST_HEAD(&ce->list);
spin_lock_init(&ce->lock);
-   atomic_set(&ce->refcnt, 0);
+   refcount_set(&ce->refcnt, 0);
atomic_dec(&ctbl->nfree);
list_add_tail(&ce->list, &ctbl->hash_list[hash]);
if (v6) {
@@ -138,9 +139,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 
*lip, u8 v6)
return -ENOMEM;
}
write_unlock_bh(&ctbl->lock);
-found:
-   atomic_inc(&ce->refcnt);
-
+   refcount_set(&ce->refcnt, 1);
return 0;
 }
 EXPORT_SYMBOL(cxgb4_clip_get);
@@ -179,7 +178,7 @@ void cxgb4_clip_release(const struct net_device *dev, const 
u32 *lip, u8 v6)
 found:
write_lock_bh(&ctbl->lock);
spin_lock_bh(&ce->lock);
-   if (atomic_dec_and_test(&ce->refcnt)) {
+   if (refcount_dec_and_test(&ce->refcnt)) {
list_del(&ce->list);
INIT_LIST_HEAD(&ce->list);
list_add_tail(&ce->list, &ctbl->ce_free_head);
@@ -266,7 +265,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
ip[0] = '\0';
sprintf(ip, "%pISc", &ce->addr);
seq_printf(seq, "%-25s   %u\n", ip,
-  atomic_read(&ce->refcnt));
+  refcount_read(&ce->refcnt));
}
}
seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree));
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h 
b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 35eb43c..a0e0ae1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -10,9 +10,11 @@
  *  release for licensing terms and conditions.
  */
 
+#include 
+
 struct clip_entry {
spinlock_t lock;/* Hold while modifying clip reference */
-   atomic_t refcnt;
+   refcount_t refcnt;
struct list_head list;
union {
struct sockaddr_in addr;
-- 
2.7.4



[PATCH 06/15] drivers, net, mlx5: convert mlx5_cq.refcount from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable mlx5_cq.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/net/ethernet/mellanox/mlx5/core/cq.c | 16 
 include/linux/mlx5/cq.h  |  4 ++--
 2 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c 
b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 336d473..1016e05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -58,7 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data)
 tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq);
-   if (atomic_dec_and_test(&mcq->refcount))
+   if (refcount_dec_and_test(&mcq->refcount))
complete(&mcq->free);
if (time_after(jiffies, end))
break;
@@ -80,7 +80,7 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
 * still arrive.
 */
if (list_empty_careful(&cq->tasklet_ctx.list)) {
-   atomic_inc(&cq->refcount);
+   refcount_inc(&cq->refcount);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
}
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
@@ -94,7 +94,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
spin_lock(&table->lock);
cq = radix_tree_lookup(&table->tree, cqn);
if (likely(cq))
-   atomic_inc(&cq->refcount);
+   refcount_inc(&cq->refcount);
spin_unlock(&table->lock);
 
if (!cq) {
@@ -106,7 +106,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
 
cq->comp(cq);
 
-   if (atomic_dec_and_test(&cq->refcount))
+   if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
 }
 
@@ -119,7 +119,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int 
event_type)
 
cq = radix_tree_lookup(&table->tree, cqn);
if (cq)
-   atomic_inc(&cq->refcount);
+   refcount_inc(&cq->refcount);
 
spin_unlock(&table->lock);
 
@@ -130,7 +130,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int 
event_type)
 
cq->event(cq, event_type);
 
-   if (atomic_dec_and_test(&cq->refcount))
+   if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
 }
 
@@ -159,7 +159,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct 
mlx5_core_cq *cq,
cq->cqn = MLX5_GET(create_cq_out, out, cqn);
cq->cons_index = 0;
cq->arm_sn = 0;
-   atomic_set(&cq->refcount, 1);
+   refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
if (!cq->comp)
cq->comp = mlx5_add_cq_to_tasklet;
@@ -222,7 +222,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct 
mlx5_core_cq *cq)
synchronize_irq(cq->irqn);
 
mlx5_debug_cq_remove(dev, cq);
-   if (atomic_dec_and_test(&cq->refcount))
+   if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
 
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 9589884..6a57ec2 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -35,7 +35,7 @@
 
 #include 
 #include 
-
+#include 
 
 struct mlx5_core_cq {
u32 cqn;
@@ -43,7 +43,7 @@ struct mlx5_core_cq {
__be32 *set_ci_db;
__be32 *arm_db;
struct mlx5_uars_page  *uar;
-   atomic_trefcount;
+   refcount_t  refcount;
struct completion   free;
unsignedvector;
unsigned intirqn;
-- 
2.7.4



[PATCH 11/15] drivers, net: convert masces_tx_sa.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable masces_tx_sa.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/net/macsec.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index e0aeb51..8948b6a 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -188,7 +188,7 @@ struct macsec_tx_sa {
struct macsec_key key;
spinlock_t lock;
u32 next_pn;
-   atomic_t refcnt;
+   refcount_t refcnt;
bool active;
struct macsec_tx_sa_stats __percpu *stats;
struct rcu_head rcu;
@@ -362,7 +362,7 @@ static struct macsec_tx_sa *macsec_txsa_get(struct 
macsec_tx_sa __rcu *ptr)
if (!sa || !sa->active)
return NULL;
 
-   if (!atomic_inc_not_zero(&sa->refcnt))
+   if (!refcount_inc_not_zero(&sa->refcnt))
return NULL;
 
return sa;
@@ -379,7 +379,7 @@ static void free_txsa(struct rcu_head *head)
 
 static void macsec_txsa_put(struct macsec_tx_sa *sa)
 {
-   if (atomic_dec_and_test(&sa->refcnt))
+   if (refcount_dec_and_test(&sa->refcnt))
call_rcu(&sa->rcu, free_txsa);
 }
 
@@ -1437,7 +1437,7 @@ static int init_tx_sa(struct macsec_tx_sa *tx_sa, char 
*sak, int key_len,
}
 
tx_sa->active = false;
-   atomic_set(&tx_sa->refcnt, 1);
+   refcount_set(&tx_sa->refcnt, 1);
spin_lock_init(&tx_sa->lock);
 
return 0;
-- 
2.7.4



[PATCH 08/15] drivers, net, hamradio: convert sixpack.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable sixpack.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/net/hamradio/6pack.c | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index bbc7b78..32f49c4 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -35,7 +35,7 @@
 #include 
 #include 
 #include 
-#include 
+#include 
 
 #define SIXPACK_VERSION"Revision: 0.3.0"
 
@@ -120,7 +120,7 @@ struct sixpack {
 
struct timer_list   tx_t;
struct timer_list   resync_t;
-   atomic_trefcnt;
+   refcount_t  refcnt;
struct semaphoredead_sem;
spinlock_t  lock;
 };
@@ -381,7 +381,7 @@ static struct sixpack *sp_get(struct tty_struct *tty)
read_lock(&disc_data_lock);
sp = tty->disc_data;
if (sp)
-   atomic_inc(&sp->refcnt);
+   refcount_inc(&sp->refcnt);
read_unlock(&disc_data_lock);
 
return sp;
@@ -389,7 +389,7 @@ static struct sixpack *sp_get(struct tty_struct *tty)
 
 static void sp_put(struct sixpack *sp)
 {
-   if (atomic_dec_and_test(&sp->refcnt))
+   if (refcount_dec_and_test(&sp->refcnt))
up(&sp->dead_sem);
 }
 
@@ -576,7 +576,7 @@ static int sixpack_open(struct tty_struct *tty)
sp->dev = dev;
 
spin_lock_init(&sp->lock);
-   atomic_set(&sp->refcnt, 1);
+   refcount_set(&sp->refcnt, 1);
sema_init(&sp->dead_sem, 0);
 
/* !!! length of the buffers. MTU is IP MTU, not PACLEN!  */
@@ -670,7 +670,7 @@ static void sixpack_close(struct tty_struct *tty)
 * We have now ensured that nobody can start using ap from now on, but
 * we have to wait for all existing users to finish.
 */
-   if (!atomic_dec_and_test(&sp->refcnt))
+   if (!refcount_dec_and_test(&sp->refcnt))
down(&sp->dead_sem);
 
/* We must stop the queue to avoid potentially scribbling
-- 
2.7.4



[PATCH 13/15] drivers, net, ppp: convert ppp_file.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable ppp_file.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/net/ppp/ppp_generic.c | 21 +++--
 1 file changed, 11 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index e365866..6566107 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -51,6 +51,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -84,7 +85,7 @@ struct ppp_file {
struct sk_buff_head xq; /* pppd transmit queue */
struct sk_buff_head rq; /* receive queue for pppd */
wait_queue_head_t rwait;/* for poll on reading /dev/ppp */
-   atomic_trefcnt; /* # refs (incl /dev/ppp attached) */
+   refcount_t  refcnt; /* # refs (incl /dev/ppp attached) */
int hdrlen; /* space to leave for headers */
int index;  /* interface unit / channel number */
int dead;   /* unit/channel has been shut down */
@@ -408,7 +409,7 @@ static int ppp_release(struct inode *unused, struct file 
*file)
unregister_netdevice(ppp->dev);
rtnl_unlock();
}
-   if (atomic_dec_and_test(&pf->refcnt)) {
+   if (refcount_dec_and_test(&pf->refcnt)) {
switch (pf->kind) {
case INTERFACE:
ppp_destroy_interface(PF_TO_PPP(pf));
@@ -881,7 +882,7 @@ static int ppp_unattached_ioctl(struct net *net, struct 
ppp_file *pf,
mutex_lock(&pn->all_ppp_mutex);
ppp = ppp_find_unit(pn, unit);
if (ppp) {
-   atomic_inc(&ppp->file.refcnt);
+   refcount_inc(&ppp->file.refcnt);
file->private_data = &ppp->file;
err = 0;
}
@@ -896,7 +897,7 @@ static int ppp_unattached_ioctl(struct net *net, struct 
ppp_file *pf,
spin_lock_bh(&pn->all_channels_lock);
chan = ppp_find_channel(pn, unit);
if (chan) {
-   atomic_inc(&chan->file.refcnt);
+   refcount_inc(&chan->file.refcnt);
file->private_data = &chan->file;
err = 0;
}
@@ -1348,7 +1349,7 @@ static int ppp_dev_init(struct net_device *dev)
 * that ppp_destroy_interface() won't run before the device gets
 * unregistered.
 */
-   atomic_inc(&ppp->file.refcnt);
+   refcount_inc(&ppp->file.refcnt);
 
return 0;
 }
@@ -1377,7 +1378,7 @@ static void ppp_dev_priv_destructor(struct net_device 
*dev)
struct ppp *ppp;
 
ppp = netdev_priv(dev);
-   if (atomic_dec_and_test(&ppp->file.refcnt))
+   if (refcount_dec_and_test(&ppp->file.refcnt))
ppp_destroy_interface(ppp);
 }
 
@@ -2676,7 +2677,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
 
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
-   if (atomic_dec_and_test(&pch->file.refcnt))
+   if (refcount_dec_and_test(&pch->file.refcnt))
ppp_destroy_channel(pch);
 }
 
@@ -3046,7 +3047,7 @@ init_ppp_file(struct ppp_file *pf, int kind)
pf->kind = kind;
skb_queue_head_init(&pf->xq);
skb_queue_head_init(&pf->rq);
-   atomic_set(&pf->refcnt, 1);
+   refcount_set(&pf->refcnt, 1);
init_waitqueue_head(&pf->rwait);
 }
 
@@ -3164,7 +3165,7 @@ ppp_connect_channel(struct channel *pch, int unit)
list_add_tail(&pch->clist, &ppp->channels);
++ppp->n_channels;
pch->ppp = ppp;
-   atomic_inc(&ppp->file.refcnt);
+   refcount_inc(&ppp->file.refcnt);
ppp_unlock(ppp);
ret = 0;
 
@@ -3195,7 +3196,7 @@ ppp_disconnect_channel(struct channel *pch)
if (--ppp-&

[PATCH 15/15] drivers, connector: convert cn_callback_entry.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable cn_callback_entry.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/connector/cn_queue.c  | 4 ++--
 drivers/connector/connector.c | 2 +-
 include/linux/connector.h | 4 ++--
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 1f8bf05..9c54fdf 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -45,7 +45,7 @@ cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const 
char *name,
return NULL;
}
 
-   atomic_set(&cbq->refcnt, 1);
+   refcount_set(&cbq->refcnt, 1);
 
atomic_inc(&dev->refcnt);
cbq->pdev = dev;
@@ -58,7 +58,7 @@ cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const 
char *name,
 
 void cn_queue_release_callback(struct cn_callback_entry *cbq)
 {
-   if (!atomic_dec_and_test(&cbq->refcnt))
+   if (!refcount_dec_and_test(&cbq->refcnt))
return;
 
atomic_dec(&cbq->pdev->refcnt);
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 25693b0..8615594b 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -157,7 +157,7 @@ static int cn_call_callback(struct sk_buff *skb)
spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
if (cn_cb_equal(&i->id.id, &msg->id)) {
-   atomic_inc(&i->refcnt);
+   refcount_inc(&i->refcnt);
cbq = i;
break;
}
diff --git a/include/linux/connector.h b/include/linux/connector.h
index f8fe863..032102b 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -22,7 +22,7 @@
 #define __CONNECTOR_H
 
 
-#include 
+#include 
 
 #include 
 #include 
@@ -49,7 +49,7 @@ struct cn_callback_id {
 
 struct cn_callback_entry {
struct list_head callback_entry;
-   atomic_t refcnt;
+   refcount_t refcnt;
struct cn_queue_dev *pdev;
 
struct cn_callback_id id;
-- 
2.7.4



[PATCH 14/15] drivers, net, ppp: convert syncppp.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable syncppp.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/net/ppp/ppp_synctty.c | 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 7868c29..7196f00 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -46,6 +46,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -72,7 +73,7 @@ struct syncppp {
 
struct tasklet_struct tsk;
 
-   atomic_trefcnt;
+   refcount_t  refcnt;
struct completion dead_cmp;
struct ppp_channel chan;/* interface to generic ppp layer */
 };
@@ -141,14 +142,14 @@ static struct syncppp *sp_get(struct tty_struct *tty)
read_lock(&disc_data_lock);
ap = tty->disc_data;
if (ap != NULL)
-   atomic_inc(&ap->refcnt);
+   refcount_inc(&ap->refcnt);
read_unlock(&disc_data_lock);
return ap;
 }
 
 static void sp_put(struct syncppp *ap)
 {
-   if (atomic_dec_and_test(&ap->refcnt))
+   if (refcount_dec_and_test(&ap->refcnt))
complete(&ap->dead_cmp);
 }
 
@@ -182,7 +183,7 @@ ppp_sync_open(struct tty_struct *tty)
skb_queue_head_init(&ap->rqueue);
tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap);
 
-   atomic_set(&ap->refcnt, 1);
+   refcount_set(&ap->refcnt, 1);
init_completion(&ap->dead_cmp);
 
ap->chan.private = ap;
@@ -232,7 +233,7 @@ ppp_sync_close(struct tty_struct *tty)
 * our channel ops (i.e. ppp_sync_send/ioctl) are in progress
 * by the time it returns.
 */
-   if (!atomic_dec_and_test(&ap->refcnt))
+   if (!refcount_dec_and_test(&ap->refcnt))
wait_for_completion(&ap->dead_cmp);
tasklet_kill(&ap->tsk);
 
-- 
2.7.4



[PATCH 12/15] drivers, net, ppp: convert asyncppp.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable asyncppp.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/net/ppp/ppp_async.c | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 814fd8f..1b28e6e 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -69,7 +69,7 @@ struct asyncppp {
 
struct tasklet_struct tsk;
 
-   atomic_trefcnt;
+   refcount_t  refcnt;
struct semaphore dead_sem;
struct ppp_channel chan;/* interface to generic ppp layer */
unsigned char   obuf[OBUFSIZE];
@@ -140,14 +140,14 @@ static struct asyncppp *ap_get(struct tty_struct *tty)
read_lock(&disc_data_lock);
ap = tty->disc_data;
if (ap != NULL)
-   atomic_inc(&ap->refcnt);
+   refcount_inc(&ap->refcnt);
read_unlock(&disc_data_lock);
return ap;
 }
 
 static void ap_put(struct asyncppp *ap)
 {
-   if (atomic_dec_and_test(&ap->refcnt))
+   if (refcount_dec_and_test(&ap->refcnt))
up(&ap->dead_sem);
 }
 
@@ -185,7 +185,7 @@ ppp_asynctty_open(struct tty_struct *tty)
skb_queue_head_init(&ap->rqueue);
tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
 
-   atomic_set(&ap->refcnt, 1);
+   refcount_set(&ap->refcnt, 1);
sema_init(&ap->dead_sem, 0);
 
ap->chan.private = ap;
@@ -234,7 +234,7 @@ ppp_asynctty_close(struct tty_struct *tty)
 * our channel ops (i.e. ppp_async_send/ioctl) are in progress
 * by the time it returns.
 */
-   if (!atomic_dec_and_test(&ap->refcnt))
+   if (!refcount_dec_and_test(&ap->refcnt))
down(&ap->dead_sem);
tasklet_kill(&ap->tsk);
 
-- 
2.7.4



[PATCH 09/15] drivers, net: convert masces_rx_sa.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable masces_rx_sa.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/net/macsec.c | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index ccbe4eaf..733e1c2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -16,6 +16,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -146,7 +147,7 @@ struct macsec_rx_sa {
struct macsec_key key;
spinlock_t lock;
u32 next_pn;
-   atomic_t refcnt;
+   refcount_t refcnt;
bool active;
struct macsec_rx_sa_stats __percpu *stats;
struct macsec_rx_sc *sc;
@@ -314,7 +315,7 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct 
macsec_rx_sa __rcu *ptr)
if (!sa || !sa->active)
return NULL;
 
-   if (!atomic_inc_not_zero(&sa->refcnt))
+   if (!refcount_inc_not_zero(&sa->refcnt))
return NULL;
 
return sa;
@@ -350,7 +351,7 @@ static void free_rxsa(struct rcu_head *head)
 
 static void macsec_rxsa_put(struct macsec_rx_sa *sa)
 {
-   if (atomic_dec_and_test(&sa->refcnt))
+   if (refcount_dec_and_test(&sa->refcnt))
call_rcu(&sa->rcu, free_rxsa);
 }
 
@@ -1339,7 +1340,7 @@ static int init_rx_sa(struct macsec_rx_sa *rx_sa, char 
*sak, int key_len,
 
rx_sa->active = false;
rx_sa->next_pn = 1;
-   atomic_set(&rx_sa->refcnt, 1);
+   refcount_set(&rx_sa->refcnt, 1);
spin_lock_init(&rx_sa->lock);
 
return 0;
-- 
2.7.4



[PATCH 10/15] drivers, net: convert masces_rx_sc.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable masces_rx_sc.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/net/macsec.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 733e1c2..e0aeb51 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -172,7 +172,7 @@ struct macsec_rx_sc {
bool active;
struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
struct pcpu_rx_sc_stats __percpu *stats;
-   atomic_t refcnt;
+   refcount_t refcnt;
struct rcu_head rcu_head;
 };
 
@@ -331,12 +331,12 @@ static void free_rx_sc_rcu(struct rcu_head *head)
 
 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
 {
-   return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL;
+   return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
 }
 
 static void macsec_rxsc_put(struct macsec_rx_sc *sc)
 {
-   if (atomic_dec_and_test(&sc->refcnt))
+   if (refcount_dec_and_test(&sc->refcnt))
call_rcu(&sc->rcu_head, free_rx_sc_rcu);
 }
 
@@ -1411,7 +1411,7 @@ static struct macsec_rx_sc *create_rx_sc(struct 
net_device *dev, sci_t sci)
 
rx_sc->sci = sci;
rx_sc->active = true;
-   atomic_set(&rx_sc->refcnt, 1);
+   refcount_set(&rx_sc->refcnt, 1);
 
secy = &macsec_priv(dev)->secy;
rcu_assign_pointer(rx_sc->next, secy->rx_sc);
-- 
2.7.4



[PATCH 07/15] drivers, net, mlx5: convert fs_node.refcount from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable fs_node.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 28 +++
 drivers/net/ethernet/mellanox/mlx5/core/fs_core.h |  3 ++-
 2 files changed, 16 insertions(+), 15 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 
b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index f77e496..c7fa1389 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -188,7 +188,7 @@ static void tree_init_node(struct fs_node *node,
   void (*del_hw_func)(struct fs_node *),
   void (*del_sw_func)(struct fs_node *))
 {
-   atomic_set(&node->refcount, 1);
+   refcount_set(&node->refcount, 1);
INIT_LIST_HEAD(&node->list);
INIT_LIST_HEAD(&node->children);
init_rwsem(&node->lock);
@@ -200,7 +200,7 @@ static void tree_init_node(struct fs_node *node,
 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
 {
if (parent)
-   atomic_inc(&parent->refcount);
+   refcount_inc(&parent->refcount);
node->parent = parent;
 
/* Parent is the root */
@@ -212,7 +212,7 @@ static void tree_add_node(struct fs_node *node, struct 
fs_node *parent)
 
 static int tree_get_node(struct fs_node *node)
 {
-   return atomic_add_unless(&node->refcount, 1, 0);
+   return refcount_inc_not_zero(&node->refcount);
 }
 
 static void nested_down_read_ref_node(struct fs_node *node,
@@ -220,7 +220,7 @@ static void nested_down_read_ref_node(struct fs_node *node,
 {
if (node) {
down_read_nested(&node->lock, class);
-   atomic_inc(&node->refcount);
+   refcount_inc(&node->refcount);
}
 }
 
@@ -229,7 +229,7 @@ static void nested_down_write_ref_node(struct fs_node *node,
 {
if (node) {
down_write_nested(&node->lock, class);
-   atomic_inc(&node->refcount);
+   refcount_inc(&node->refcount);
}
 }
 
@@ -237,19 +237,19 @@ static void down_write_ref_node(struct fs_node *node)
 {
if (node) {
down_write(&node->lock);
-   atomic_inc(&node->refcount);
+   refcount_inc(&node->refcount);
}
 }
 
 static void up_read_ref_node(struct fs_node *node)
 {
-   atomic_dec(&node->refcount);
+   refcount_dec(&node->refcount);
up_read(&node->lock);
 }
 
 static void up_write_ref_node(struct fs_node *node)
 {
-   atomic_dec(&node->refcount);
+   refcount_dec(&node->refcount);
up_write(&node->lock);
 }
 
@@ -257,7 +257,7 @@ static void tree_put_node(struct fs_node *node)
 {
struct fs_node *parent_node = node->parent;
 
-   if (atomic_dec_and_test(&node->refcount)) {
+   if (refcount_dec_and_test(&node->refcount)) {
if (node->del_hw_func)
node->del_hw_func(node);
if (parent_node) {
@@ -280,8 +280,8 @@ static void tree_put_node(struct fs_node *node)
 
 static int tree_remove_node(struct fs_node *node)
 {
-   if (atomic_read(&node->refcount) > 1) {
-   atomic_dec(&node->refcount);
+   if (refcount_read(&node->refcount) > 1) {
+   refcount_dec(&node->refcount);
return -EEXIST;
}
tree_put_node(node);
@@ -1184,7 +1184,7 @@ static void destroy_flow_handle(struct fs_fte *fte,
int i)
 {
for (; --i >= 0;) {
-   if (atomic_dec_and_test(&handle->rule[i]->node.refcount)) {
+   if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
fte->dests_size--;
list_del(&handle->rule[i]->node.list);
kfree(handle->rule[i]);
@@ -1215,7 +1215,7 @@ create_flow_handle(struc

[PATCH 04/15] drivers, net, mlx4: convert mlx4_qp.refcount from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable mlx4_qp.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/net/ethernet/mellanox/mlx4/qp.c | 8 
 include/linux/mlx4/device.h | 2 +-
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c 
b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2033209..769598f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -55,7 +55,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int 
event_type)
 
qp = __mlx4_qp_lookup(dev, qpn);
if (qp)
-   atomic_inc(&qp->refcount);
+   refcount_inc(&qp->refcount);
 
spin_unlock(&qp_table->lock);
 
@@ -66,7 +66,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int 
event_type)
 
qp->event(qp, event_type);
 
-   if (atomic_dec_and_test(&qp->refcount))
+   if (refcount_dec_and_test(&qp->refcount))
complete(&qp->free);
 }
 
@@ -420,7 +420,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct 
mlx4_qp *qp)
if (err)
goto err_icm;
 
-   atomic_set(&qp->refcount, 1);
+   refcount_set(&qp->refcount, 1);
init_completion(&qp->free);
 
return 0;
@@ -520,7 +520,7 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
 
 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
 {
-   if (atomic_dec_and_test(&qp->refcount))
+   if (refcount_dec_and_test(&qp->refcount))
complete(&qp->free);
wait_for_completion(&qp->free);
 
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index daac2e3..b8e19c4 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -768,7 +768,7 @@ struct mlx4_qp {
 
int qpn;
 
-   atomic_trefcount;
+   refcount_t  refcount;
struct completion   free;
u8  usage;
 };
-- 
2.7.4



[PATCH 03/15] drivers, net, mlx4: convert mlx4_cq.refcount from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable mlx4_cq.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/net/ethernet/mellanox/mlx4/cq.c | 8 
 include/linux/mlx4/device.h | 4 ++--
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c 
b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 72eb50c..d8e9a32 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -69,7 +69,7 @@ void mlx4_cq_tasklet_cb(unsigned long data)
list_for_each_entry_safe(mcq, temp, &ctx->process_list, 
tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq);
-   if (atomic_dec_and_test(&mcq->refcount))
+   if (refcount_dec_and_test(&mcq->refcount))
complete(&mcq->free);
if (time_after(jiffies, end))
break;
@@ -92,7 +92,7 @@ static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
 * still arrive.
 */
if (list_empty_careful(&cq->tasklet_ctx.list)) {
-   atomic_inc(&cq->refcount);
+   refcount_inc(&cq->refcount);
kick = list_empty(&tasklet_ctx->list);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
if (kick)
@@ -344,7 +344,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq->cons_index = 0;
cq->arm_sn = 1;
cq->uar= uar;
-   atomic_set(&cq->refcount, 1);
+   refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
cq->comp = mlx4_add_cq_to_tasklet;
cq->tasklet_ctx.priv =
@@ -386,7 +386,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
 
-   if (atomic_dec_and_test(&cq->refcount))
+   if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
 
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index b0a57e0..daac2e3 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -40,7 +40,7 @@
 #include 
 #include 
 
-#include 
+#include 
 
 #include 
 
@@ -751,7 +751,7 @@ struct mlx4_cq {
int cqn;
unsignedvector;
 
-   atomic_trefcount;
+   refcount_t  refcount;
struct completion   free;
struct {
struct list_head list;
-- 
2.7.4



[PATCH 2/4] dm cache: convert dm_cache_metadata.ref_count from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable dm_cache_metadata.ref_count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/md/dm-cache-metadata.c | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 4a4e9c7..0d72124 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -13,6 +13,7 @@
 #include "persistent-data/dm-transaction-manager.h"
 
 #include 
+#include 
 
 /**/
 
@@ -100,7 +101,7 @@ struct cache_disk_superblock {
 } __packed;
 
 struct dm_cache_metadata {
-   atomic_t ref_count;
+   refcount_t ref_count;
struct list_head list;
 
unsigned version;
@@ -753,7 +754,7 @@ static struct dm_cache_metadata *metadata_open(struct 
block_device *bdev,
}
 
cmd->version = metadata_version;
-   atomic_set(&cmd->ref_count, 1);
+   refcount_set(&cmd->ref_count, 1);
init_rwsem(&cmd->root_lock);
cmd->bdev = bdev;
cmd->data_block_size = data_block_size;
@@ -791,7 +792,7 @@ static struct dm_cache_metadata *lookup(struct block_device 
*bdev)
 
list_for_each_entry(cmd, &table, list)
if (cmd->bdev == bdev) {
-   atomic_inc(&cmd->ref_count);
+   refcount_inc(&cmd->ref_count);
return cmd;
}
 
@@ -862,7 +863,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct 
block_device *bdev,
 
 void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
 {
-   if (atomic_dec_and_test(&cmd->ref_count)) {
+   if (refcount_dec_and_test(&cmd->ref_count)) {
mutex_lock(&table_lock);
list_del(&cmd->list);
mutex_unlock(&table_lock);
-- 
2.7.4



[PATCH 3/4] dm: convert dm_dev_internal.count from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable dm_dev_internal.count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/md/dm-table.c | 6 +++---
 drivers/md/dm.h   | 3 ++-
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ef7b8f2..fc7d240 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -451,15 +451,15 @@ int dm_get_device(struct dm_target *ti, const char *path, 
fmode_t mode,
return r;
}
 
-   atomic_set(&dd->count, 0);
+   refcount_set(&dd->count, 1);
list_add(&dd->list, &t->devices);
 
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
+   refcount_inc(&dd->count);
}
-   atomic_inc(&dd->count);
 
*result = dd->dm_dev;
return 0;
@@ -515,7 +515,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
   dm_device_name(ti->table->md), d->name);
return;
}
-   if (atomic_dec_and_test(&dd->count)) {
+   if (refcount_dec_and_test(&dd->count)) {
dm_put_table_device(ti->table->md, d);
list_del(&dd->list);
kfree(dd);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 38c84c0..36399bb8 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -19,6 +19,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "dm-stats.h"
 
@@ -38,7 +39,7 @@
  */
 struct dm_dev_internal {
struct list_head list;
-   atomic_t count;
+   refcount_t count;
struct dm_dev *dm_dev;
 };
 
-- 
2.7.4



[PATCH 4/4] dm: convert table_device.count from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable table_device.count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/md/dm.c | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4be8532..be12f3f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -24,6 +24,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define DM_MSG_PREFIX "core"
 
@@ -98,7 +99,7 @@ struct dm_md_mempools {
 
 struct table_device {
struct list_head list;
-   atomic_t count;
+   refcount_t count;
struct dm_dev dm_dev;
 };
 
@@ -685,10 +686,11 @@ int dm_get_table_device(struct mapped_device *md, dev_t 
dev, fmode_t mode,
 
format_dev_t(td->dm_dev.name, dev);
 
-   atomic_set(&td->count, 0);
+   refcount_set(&td->count, 1);
list_add(&td->list, &md->table_devices);
+   } else {
+   refcount_inc(&td->count);
}
-   atomic_inc(&td->count);
mutex_unlock(&md->table_devices_lock);
 
*result = &td->dm_dev;
@@ -701,7 +703,7 @@ void dm_put_table_device(struct mapped_device *md, struct 
dm_dev *d)
struct table_device *td = container_of(d, struct table_device, dm_dev);
 
mutex_lock(&md->table_devices_lock);
-   if (atomic_dec_and_test(&td->count)) {
+   if (refcount_dec_and_test(&td->count)) {
close_table_device(td, md);
list_del(&td->list);
kfree(td);
@@ -718,7 +720,7 @@ static void free_table_devices(struct list_head *devices)
struct table_device *td = list_entry(tmp, struct table_device, 
list);
 
DMWARN("dm_destroy: %s still exists with %d references",
-  td->dm_dev.name, atomic_read(&td->count));
+  td->dm_dev.name, refcount_read(&td->count));
kfree(td);
}
 }
-- 
2.7.4



[PATCH 0/4] dm and bcache refcount conversions

2017-10-20 Thread Elena Reshetova
This series, for dm and bcache parts, replaces atomic_t reference
counters with the new refcount_t type and API (see include/linux/refcount.h).
By doing this we prevent intentional or accidental
underflows or overflows that can led to use-after-free vulnerabilities.

The patches are fully independent and can be cherry-picked separately.
Patches are based on top of linux-next as of yesterday.
If there are no objections to the patches, please merge them via respective 
trees

Elena Reshetova (4):
  bcache: convert cached_dev.count from atomic_t to refcount_t
  dm cache: convert dm_cache_metadata.ref_count from atomic_t to
refcount_t
  dm: convert dm_dev_internal.count from atomic_t to refcount_t
  dm: convert table_device.count from atomic_t to refcount_t

 drivers/md/bcache/bcache.h |  7 ---
 drivers/md/bcache/super.c  |  6 +++---
 drivers/md/bcache/writeback.h  |  2 +-
 drivers/md/dm-cache-metadata.c |  9 +
 drivers/md/dm-table.c  |  6 +++---
 drivers/md/dm.c| 12 +++-
 drivers/md/dm.h|  3 ++-
 7 files changed, 25 insertions(+), 20 deletions(-)

-- 
2.7.4



[PATCH 1/4] bcache: convert cached_dev.count from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable cached_dev.count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/md/bcache/bcache.h| 7 ---
 drivers/md/bcache/super.c | 6 +++---
 drivers/md/bcache/writeback.h | 2 +-
 3 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 2ed9bd2..8ed8ad6 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -184,6 +184,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -299,7 +300,7 @@ struct cached_dev {
struct semaphoresb_write_mutex;
 
/* Refcount on the cache set. Always nonzero when we're caching. */
-   atomic_tcount;
+   refcount_t  count;
struct work_struct  detach;
 
/*
@@ -806,13 +807,13 @@ do {  
\
 
 static inline void cached_dev_put(struct cached_dev *dc)
 {
-   if (atomic_dec_and_test(&dc->count))
+   if (refcount_dec_and_test(&dc->count))
schedule_work(&dc->detach);
 }
 
 static inline bool cached_dev_get(struct cached_dev *dc)
 {
-   if (!atomic_inc_not_zero(&dc->count))
+   if (!refcount_inc_not_zero(&dc->count))
return false;
 
/* Paired with the mb in cached_dev_attach */
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index fc0a31b..676e6f4 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -889,7 +889,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
closure_init_stack(&cl);
 
BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
-   BUG_ON(atomic_read(&dc->count));
+   BUG_ON(refcount_read(&dc->count));
 
mutex_lock(&bch_register_lock);
 
@@ -1016,7 +1016,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct 
cache_set *c)
 * dc->c must be set before dc->count != 0 - paired with the mb in
 * cached_dev_get()
 */
-   atomic_set(&dc->count, 1);
+   refcount_set(&dc->count, 1);
 
/* Block writeback thread, but spawn it */
down_write(&dc->writeback_lock);
@@ -1028,7 +1028,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct 
cache_set *c)
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
bch_sectors_dirty_init(&dc->disk);
atomic_set(&dc->has_dirty, 1);
-   atomic_inc(&dc->count);
+   refcount_inc(&dc->count);
bch_writeback_queue(dc);
}
 
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index e35421d..5937e0d 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -89,7 +89,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
 {
if (!atomic_read(&dc->has_dirty) &&
!atomic_xchg(&dc->has_dirty, 1)) {
-   atomic_inc(&dc->count);
+   refcount_inc(&dc->count);
 
if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
-- 
2.7.4



[PATCH 1/3] [S390] vmur: convert urdev.ref_count from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable urdev.ref_count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/s390/char/vmur.c | 8 
 drivers/s390/char/vmur.h | 4 +++-
 2 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 04aceb6..ced8151 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -110,7 +110,7 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
mutex_init(&urd->io_mutex);
init_waitqueue_head(&urd->wait);
spin_lock_init(&urd->open_lock);
-   atomic_set(&urd->ref_count,  1);
+   refcount_set(&urd->ref_count,  1);
urd->cdev = cdev;
get_device(&cdev->dev);
return urd;
@@ -126,7 +126,7 @@ static void urdev_free(struct urdev *urd)
 
 static void urdev_get(struct urdev *urd)
 {
-   atomic_inc(&urd->ref_count);
+   refcount_inc(&urd->ref_count);
 }
 
 static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
@@ -159,7 +159,7 @@ static struct urdev *urdev_get_from_devno(u16 devno)
 
 static void urdev_put(struct urdev *urd)
 {
-   if (atomic_dec_and_test(&urd->ref_count))
+   if (refcount_dec_and_test(&urd->ref_count))
urdev_free(urd);
 }
 
@@ -946,7 +946,7 @@ static int ur_set_offline_force(struct ccw_device *cdev, 
int force)
rc = -EBUSY;
goto fail_urdev_put;
}
-   if (!force && (atomic_read(&urd->ref_count) > 2)) {
+   if (!force && (refcount_read(&urd->ref_count) > 2)) {
/* There is still a user of urd (e.g. ur_open) */
TRACE("ur_set_offline: BUSY\n");
rc = -EBUSY;
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
index fa320ad..35ea9d1 100644
--- a/drivers/s390/char/vmur.h
+++ b/drivers/s390/char/vmur.h
@@ -11,6 +11,8 @@
 #ifndef _VMUR_H_
 #define _VMUR_H_
 
+#include 
+
 #define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
 #define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
 /*
@@ -69,7 +71,7 @@ struct urdev {
size_t reclen;  /* Record length for *write* CCWs */
int class;  /* VM device class */
int io_request_rc;  /* return code from I/O request */
-   atomic_t ref_count; /* reference counter */
+   refcount_t ref_count;   /* reference counter */
wait_queue_head_t wait; /* wait queue to serialize open */
int open_flag;  /* "urdev is open" flag */
spinlock_t open_lock;   /* serialize critical sections */
-- 
2.7.4



[PATCH 3/3] [S390] qeth: convert qeth_reply.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable qeth_reply.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/s390/net/qeth_core.h  |  3 ++-
 drivers/s390/net/qeth_core_main.c | 10 +-
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 91fcadb..49ba742 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -20,6 +20,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -645,7 +646,7 @@ struct qeth_reply {
int rc;
void *param;
struct qeth_card *card;
-   atomic_t refcnt;
+   refcount_t refcnt;
 };
 
 struct qeth_card_blkt {
diff --git a/drivers/s390/net/qeth_core_main.c 
b/drivers/s390/net/qeth_core_main.c
index 68e118f..86e4c5d 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -559,7 +559,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card 
*card)
 
reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
if (reply) {
-   atomic_set(&reply->refcnt, 1);
+   refcount_set(&reply->refcnt, 1);
atomic_set(&reply->received, 0);
reply->card = card;
}
@@ -568,14 +568,14 @@ static struct qeth_reply *qeth_alloc_reply(struct 
qeth_card *card)
 
 static void qeth_get_reply(struct qeth_reply *reply)
 {
-   WARN_ON(atomic_read(&reply->refcnt) <= 0);
-   atomic_inc(&reply->refcnt);
+   WARN_ON(refcount_read(&reply->refcnt) == 0);
+   refcount_inc(&reply->refcnt);
 }
 
 static void qeth_put_reply(struct qeth_reply *reply)
 {
-   WARN_ON(atomic_read(&reply->refcnt) <= 0);
-   if (atomic_dec_and_test(&reply->refcnt))
+   WARN_ON(refcount_read(&reply->refcnt) == 0);
+   if (refcount_dec_and_test(&reply->refcnt))
kfree(reply);
 }
 
-- 
2.7.4



[PATCH 0/3] s390 refcount conversions

2017-10-20 Thread Elena Reshetova
This series, for S390, replaces atomic_t reference
counters with the new refcount_t type and API (see include/linux/refcount.h).
By doing this we prevent intentional or accidental
underflows or overflows that can led to use-after-free vulnerabilities.

The patches are fully independent and can be cherry-picked separately.
Patches are based on top of linux-next as of yesterday.
If there are no objections to the patches, please merge them via respective 
trees

Elena Reshetova (3):
  [S390] vmur: convert urdev.ref_count from atomic_t to refcount_t
  [S390] net: convert lcs_reply.refcnt from atomic_t to refcount_t
  [S390] qeth: convert qeth_reply.refcnt from atomic_t to refcount_t

 drivers/s390/char/vmur.c  |  8 
 drivers/s390/char/vmur.h  |  4 +++-
 drivers/s390/net/lcs.c| 10 +-
 drivers/s390/net/lcs.h|  3 ++-
 drivers/s390/net/qeth_core.h  |  3 ++-
 drivers/s390/net/qeth_core_main.c | 10 +-
 6 files changed, 21 insertions(+), 17 deletions(-)

-- 
2.7.4



[PATCH 2/3] [S390] net: convert lcs_reply.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable lcs_reply.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/s390/net/lcs.c | 10 +-
 drivers/s390/net/lcs.h |  3 ++-
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index d01b5c2..d7344be 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -769,15 +769,15 @@ lcs_get_lancmd(struct lcs_card *card, int count)
 static void
 lcs_get_reply(struct lcs_reply *reply)
 {
-   WARN_ON(atomic_read(&reply->refcnt) <= 0);
-   atomic_inc(&reply->refcnt);
+   WARN_ON(refcount_read(&reply->refcnt) == 0);
+   refcount_inc(&reply->refcnt);
 }
 
 static void
 lcs_put_reply(struct lcs_reply *reply)
 {
-WARN_ON(atomic_read(&reply->refcnt) <= 0);
-if (atomic_dec_and_test(&reply->refcnt)) {
+   WARN_ON(refcount_read(&reply->refcnt) == 0);
+   if (refcount_dec_and_test(&reply->refcnt)) {
kfree(reply);
}
 
@@ -793,7 +793,7 @@ lcs_alloc_reply(struct lcs_cmd *cmd)
reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
if (!reply)
return NULL;
-   atomic_set(&reply->refcnt,1);
+   refcount_set(&reply->refcnt,1);
reply->sequence_no = cmd->sequence_no;
reply->received = 0;
reply->rc = 0;
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 150fcb4..3802f4f 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -4,6 +4,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #define LCS_DBF_TEXT(level, name, text) \
@@ -270,7 +271,7 @@ struct lcs_buffer {
 struct lcs_reply {
struct list_head list;
__u16 sequence_no;
-   atomic_t refcnt;
+   refcount_t refcnt;
/* Callback for completion notification. */
void (*callback)(struct lcs_card *, struct lcs_cmd *);
wait_queue_head_t wait_q;
-- 
2.7.4



[PATCH] sparc64: convert mdesc_handle.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable mdesc_handle.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 arch/sparc/kernel/mdesc.c | 17 +
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index fa466ce..821a724 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -12,6 +12,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -70,7 +71,7 @@ struct mdesc_handle {
struct list_headlist;
struct mdesc_mem_ops*mops;
void*self_base;
-   atomic_trefcnt;
+   refcount_t  refcnt;
unsigned inthandle_size;
struct mdesc_hdrmdesc;
 };
@@ -152,7 +153,7 @@ static void mdesc_handle_init(struct mdesc_handle *hp,
memset(hp, 0, handle_size);
INIT_LIST_HEAD(&hp->list);
hp->self_base = base;
-   atomic_set(&hp->refcnt, 1);
+   refcount_set(&hp->refcnt, 1);
hp->handle_size = handle_size;
 }
 
@@ -182,7 +183,7 @@ static void __init mdesc_memblock_free(struct mdesc_handle 
*hp)
unsigned int alloc_size;
unsigned long start;
 
-   BUG_ON(atomic_read(&hp->refcnt) != 0);
+   BUG_ON(refcount_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
 
alloc_size = PAGE_ALIGN(hp->handle_size);
@@ -220,7 +221,7 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int 
mdesc_size)
 
 static void mdesc_kfree(struct mdesc_handle *hp)
 {
-   BUG_ON(atomic_read(&hp->refcnt) != 0);
+   BUG_ON(refcount_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
 
kfree(hp->self_base);
@@ -259,7 +260,7 @@ struct mdesc_handle *mdesc_grab(void)
spin_lock_irqsave(&mdesc_lock, flags);
hp = cur_mdesc;
if (hp)
-   atomic_inc(&hp->refcnt);
+   refcount_inc(&hp->refcnt);
spin_unlock_irqrestore(&mdesc_lock, flags);
 
return hp;
@@ -271,7 +272,7 @@ void mdesc_release(struct mdesc_handle *hp)
unsigned long flags;
 
spin_lock_irqsave(&mdesc_lock, flags);
-   if (atomic_dec_and_test(&hp->refcnt)) {
+   if (refcount_dec_and_test(&hp->refcnt)) {
list_del_init(&hp->list);
hp->mops->free(hp);
}
@@ -513,7 +514,7 @@ void mdesc_update(void)
if (status != HV_EOK || real_len > len) {
printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
   status);
-   atomic_dec(&hp->refcnt);
+   refcount_dec(&hp->refcnt);
mdesc_free(hp);
goto out;
}
@@ -526,7 +527,7 @@ void mdesc_update(void)
mdesc_notify_clients(orig_hp, hp);
 
spin_lock_irqsave(&mdesc_lock, flags);
-   if (atomic_dec_and_test(&orig_hp->refcnt))
+   if (refcount_dec_and_test(&orig_hp->refcnt))
mdesc_free(orig_hp);
else
list_add(&orig_hp->list, &mdesc_zombie_list);
-- 
2.7.4



[PATCH 0/6] v4 block refcount conversion patches

2017-10-20 Thread Elena Reshetova
Changes in v4:
 - Improved commit messages and signoff info.
 - Rebase on top of linux-next as of yesterday.
 - WARN_ONs are restored since x86 refcount_t does not WARN on zero

Changes in v3:
No changes in patches apart from trivial rebases, but now by
default refcount_t = atomic_t and uses all atomic standard operations
unless CONFIG_REFCOUNT_FULL is enabled. This is a compromize for the
systems that are critical on performance and cannot accept even
slight delay on the refcounter operations.

Changes in v2:
Not needed WARNs are removed since refcount_t warns by itself.
BUG_ONs are left as it is, since refcount_t doesn't bug by default.

This series, for block subsystem, replaces atomic_t reference
counters with the new refcount_t type and API (see include/linux/refcount.h).
By doing this we prevent intentional or accidental
underflows or overflows that can lead to use-after-free vulnerabilities.

The patches are fully independent and can be cherry-picked separately.
If there are no objections to the patches, please merge them via respective 
trees.

Elena Reshetova (6):
  block: convert bio.__bi_cnt from atomic_t to refcount_t
  block: convert blk_queue_tag.refcnt from atomic_t to refcount_t
  block: convert blkcg_gq.refcnt from atomic_t to refcount_t
  block: convert io_context.active_ref from atomic_t to refcount_t
  block: convert bsg_device.ref_count from atomic_t to refcount_t
  drivers, block: convert xen_blkif.refcnt from atomic_t to refcount_t

 block/bfq-iosched.c|  2 +-
 block/bio.c|  6 +++---
 block/blk-cgroup.c |  2 +-
 block/blk-ioc.c|  4 ++--
 block/blk-tag.c|  8 
 block/bsg.c|  9 +
 block/cfq-iosched.c|  4 ++--
 drivers/block/xen-blkback/common.h |  7 ---
 drivers/block/xen-blkback/xenbus.c |  2 +-
 fs/btrfs/volumes.c |  2 +-
 include/linux/bio.h|  4 ++--
 include/linux/blk-cgroup.h | 11 ++-
 include/linux/blk_types.h  |  3 ++-
 include/linux/blkdev.h |  3 ++-
 include/linux/iocontext.h  |  7 ---
 15 files changed, 40 insertions(+), 34 deletions(-)

-- 
2.7.4



[PATCH 2/6] block: convert blk_queue_tag.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable blk_queue_tag.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 block/blk-tag.c| 8 
 include/linux/blkdev.h | 3 ++-
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/block/blk-tag.c b/block/blk-tag.c
index e1a9c15..a7263e3 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -35,7 +35,7 @@ EXPORT_SYMBOL(blk_queue_find_tag);
  */
 void blk_free_tags(struct blk_queue_tag *bqt)
 {
-   if (atomic_dec_and_test(&bqt->refcnt)) {
+   if (refcount_dec_and_test(&bqt->refcnt)) {
BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
bqt->max_depth);
 
@@ -130,7 +130,7 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct 
request_queue *q,
if (init_tag_map(q, tags, depth))
goto fail;
 
-   atomic_set(&tags->refcnt, 1);
+   refcount_set(&tags->refcnt, 1);
tags->alloc_policy = alloc_policy;
tags->next_tag = 0;
return tags;
@@ -180,7 +180,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
queue_flag_set(QUEUE_FLAG_QUEUED, q);
return 0;
} else
-   atomic_inc(&tags->refcnt);
+   refcount_inc(&tags->refcnt);
 
/*
 * assign it, all done
@@ -225,7 +225,7 @@ int blk_queue_resize_tags(struct request_queue *q, int 
new_depth)
 * Currently cannot replace a shared tag map with a new
 * one, so error out if this is the case
 */
-   if (atomic_read(&bqt->refcnt) != 1)
+   if (refcount_read(&bqt->refcnt) != 1)
return -EBUSY;
 
/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 02fa42d..1fefdbb 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -26,6 +26,7 @@
 #include 
 #include 
 #include 
+#include 
 
 struct module;
 struct scsi_ioctl_command;
@@ -295,7 +296,7 @@ struct blk_queue_tag {
unsigned long *tag_map; /* bit map of free/busy tags */
int max_depth;  /* what we will send to device */
int real_max_depth; /* what the array can hold */
-   atomic_t refcnt;/* map can be shared */
+   refcount_t refcnt;  /* map can be shared */
int alloc_policy;   /* tag allocation policy */
int next_tag;   /* next tag */
 };
-- 
2.7.4



[PATCH 5/6] block: convert bsg_device.ref_count from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable bsg_device.ref_count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 block/bsg.c | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/block/bsg.c b/block/bsg.c
index ee1335c..6c98422 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -21,6 +21,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -38,7 +39,7 @@ struct bsg_device {
struct list_head busy_list;
struct list_head done_list;
struct hlist_node dev_list;
-   atomic_t ref_count;
+   refcount_t ref_count;
int queued_cmds;
int done_cmds;
wait_queue_head_t wq_done;
@@ -710,7 +711,7 @@ static int bsg_put_device(struct bsg_device *bd)
 
mutex_lock(&bsg_mutex);
 
-   do_free = atomic_dec_and_test(&bd->ref_count);
+   do_free = refcount_dec_and_test(&bd->ref_count);
if (!do_free) {
mutex_unlock(&bsg_mutex);
goto out;
@@ -768,7 +769,7 @@ static struct bsg_device *bsg_add_device(struct inode 
*inode,
 
bsg_set_block(bd, file);
 
-   atomic_set(&bd->ref_count, 1);
+   refcount_set(&bd->ref_count, 1);
mutex_lock(&bsg_mutex);
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
 
@@ -788,7 +789,7 @@ static struct bsg_device *__bsg_get_device(int minor, 
struct request_queue *q)
 
hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
if (bd->queue == q) {
-   atomic_inc(&bd->ref_count);
+   refcount_inc(&bd->ref_count);
goto found;
}
}
-- 
2.7.4



[PATCH 4/6] block: convert io_context.active_ref from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable io_context.active_ref is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 block/bfq-iosched.c   | 2 +-
 block/blk-ioc.c   | 4 ++--
 block/cfq-iosched.c   | 4 ++--
 include/linux/iocontext.h | 7 ---
 4 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index a4783da..1ec9b22 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4030,7 +4030,7 @@ static void bfq_update_has_short_ttime(struct bfq_data 
*bfqd,
 * bfqq. Otherwise check average think time to
 * decide whether to mark as has_short_ttime
 */
-   if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
+   if (refcount_read(&bic->icq.ioc->active_ref) == 0 ||
(bfq_sample_valid(bfqq->ttime.ttime_samples) &&
 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
has_short_ttime = false;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 63898d2..69704d2 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -176,7 +176,7 @@ void put_io_context_active(struct io_context *ioc)
unsigned long flags;
struct io_cq *icq;
 
-   if (!atomic_dec_and_test(&ioc->active_ref)) {
+   if (!refcount_dec_and_test(&ioc->active_ref)) {
put_io_context(ioc);
return;
}
@@ -275,7 +275,7 @@ int create_task_io_context(struct task_struct *task, gfp_t 
gfp_flags, int node)
/* initialize */
atomic_long_set(&ioc->refcount, 1);
atomic_set(&ioc->nr_tasks, 1);
-   atomic_set(&ioc->active_ref, 1);
+   refcount_set(&ioc->active_ref, 1);
spin_lock_init(&ioc->lock);
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ioc->icq_list);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9f342ef..e6d5d6d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2941,7 +2941,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
 * task has exited, don't wait
 */
cic = cfqd->active_cic;
-   if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
+   if (!cic || !refcount_read(&cic->icq.ioc->active_ref))
return;
 
/*
@@ -3933,7 +3933,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct 
cfq_queue *cfqq,
 
if (cfqq->next_rq && req_noidle(cfqq->next_rq))
enable_idle = 0;
-   else if (!atomic_read(&cic->icq.ioc->active_ref) ||
+   else if (!refcount_read(&cic->icq.ioc->active_ref) ||
 !cfqd->cfq_slice_idle ||
 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
enable_idle = 0;
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index df38db2..a1e28c3 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -3,6 +3,7 @@
 
 #include 
 #include 
+#include 
 #include 
 
 enum {
@@ -96,7 +97,7 @@ struct io_cq {
  */
 struct io_context {
atomic_long_t refcount;
-   atomic_t active_ref;
+   refcount_t active_ref;
atomic_t nr_tasks;
 
/* all the fields below are protected by this lock */
@@ -128,9 +129,9 @@ struct io_context {
 static inline void get_io_context_active(struct io_context *ioc)
 {
WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
-   WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
+   WARN_ON_ONCE(refcount_read(&ioc->active_ref) == 0);
atomic_long_inc(&ioc->refcount);
-   atomic_inc(&ioc->active_ref);
+   refcount_inc(&ioc->active_ref);
 }
 
 static inline void ioc_task_link(struct io_context *ioc)
-- 
2.7.4



[PATCH 6/6] drivers, block: convert xen_blkif.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable xen_blkif.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 drivers/block/xen-blkback/common.h | 7 ---
 drivers/block/xen-blkback/xenbus.c | 2 +-
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/block/xen-blkback/common.h 
b/drivers/block/xen-blkback/common.h
index ecb35fe..0c3320d 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -35,6 +35,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -319,7 +320,7 @@ struct xen_blkif {
struct xen_vbd  vbd;
/* Back pointer to the backend_info. */
struct backend_info *be;
-   atomic_trefcnt;
+   refcount_t  refcnt;
/* for barrier (drain) requests */
struct completion   drain_complete;
atomic_tdrain;
@@ -372,10 +373,10 @@ struct pending_req {
 (_v)->bdev->bd_part->nr_sects : \
  get_capacity((_v)->bdev->bd_disk))
 
-#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
+#define xen_blkif_get(_b) (refcount_inc(&(_b)->refcnt))
 #define xen_blkif_put(_b)  \
do {\
-   if (atomic_dec_and_test(&(_b)->refcnt)) \
+   if (refcount_dec_and_test(&(_b)->refcnt))   \
schedule_work(&(_b)->free_work);\
} while (0)
 
diff --git a/drivers/block/xen-blkback/xenbus.c 
b/drivers/block/xen-blkback/xenbus.c
index 21c1be1..5955b61 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -176,7 +176,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
return ERR_PTR(-ENOMEM);
 
blkif->domid = domid;
-   atomic_set(&blkif->refcnt, 1);
+   refcount_set(&blkif->refcnt, 1);
init_completion(&blkif->drain_complete);
INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
 
-- 
2.7.4



[PATCH 3/6] block: convert blkcg_gq.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable blkcg_gq.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 block/blk-cgroup.c |  2 +-
 include/linux/blk-cgroup.h | 11 ++-
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index d3f56ba..1e7cedc 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -107,7 +107,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, 
struct request_queue *q,
blkg->q = q;
INIT_LIST_HEAD(&blkg->q_node);
blkg->blkcg = blkcg;
-   atomic_set(&blkg->refcnt, 1);
+   refcount_set(&blkg->refcnt, 1);
 
/* root blkg uses @q->root_rl, init rl only for !root blkgs */
if (blkcg != &blkcg_root) {
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 9d92153..c95d29d 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -19,6 +19,7 @@
 #include 
 #include 
 #include 
+#include 
 
 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
 #define BLKG_STAT_CPU_BATCH(INT_MAX / 2)
@@ -122,7 +123,7 @@ struct blkcg_gq {
struct request_list rl;
 
/* reference count */
-   atomic_trefcnt;
+   refcount_t  refcnt;
 
/* is this blkg online? protected by both blkcg and q locks */
boolonline;
@@ -354,8 +355,8 @@ static inline int blkg_path(struct blkcg_gq *blkg, char 
*buf, int buflen)
  */
 static inline void blkg_get(struct blkcg_gq *blkg)
 {
-   WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
-   atomic_inc(&blkg->refcnt);
+   WARN_ON_ONCE(refcount_read(&blkg->refcnt) == 0);
+   refcount_inc(&blkg->refcnt);
 }
 
 void __blkg_release_rcu(struct rcu_head *rcu);
@@ -366,8 +367,8 @@ void __blkg_release_rcu(struct rcu_head *rcu);
  */
 static inline void blkg_put(struct blkcg_gq *blkg)
 {
-   WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
-   if (atomic_dec_and_test(&blkg->refcnt))
+   WARN_ON_ONCE(refcount_read(&blkg->refcnt) == 0);
+   if (refcount_dec_and_test(&blkg->refcnt))
call_rcu(&blkg->rcu_head, __blkg_release_rcu);
 }
 
-- 
2.7.4



[PATCH 1/6] block: convert bio.__bi_cnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable bio.__bi_cnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 block/bio.c   | 6 +++---
 fs/btrfs/volumes.c| 2 +-
 include/linux/bio.h   | 4 ++--
 include/linux/blk_types.h | 3 ++-
 4 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/block/bio.c b/block/bio.c
index 101c2a9..58edc1b 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -279,7 +279,7 @@ void bio_init(struct bio *bio, struct bio_vec *table,
 {
memset(bio, 0, sizeof(*bio));
atomic_set(&bio->__bi_remaining, 1);
-   atomic_set(&bio->__bi_cnt, 1);
+   refcount_set(&bio->__bi_cnt, 1);
 
bio->bi_io_vec = table;
bio->bi_max_vecs = max_vecs;
@@ -557,12 +557,12 @@ void bio_put(struct bio *bio)
if (!bio_flagged(bio, BIO_REFFED))
bio_free(bio);
else {
-   BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
+   BIO_BUG_ON(!refcount_read(&bio->__bi_cnt));
 
/*
 * last put frees it
 */
-   if (atomic_dec_and_test(&bio->__bi_cnt))
+   if (refcount_dec_and_test(&bio->__bi_cnt))
bio_free(bio);
}
 }
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b397375..11812ee 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -450,7 +450,7 @@ static noinline void run_scheduled_bios(struct btrfs_device 
*device)
waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait);
 
-   BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
+   BUG_ON(refcount_read(&cur->__bi_cnt) == 0);
 
/*
 * if we're doing the sync list, record that our
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 275c91c..0fa4dd2 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -253,7 +253,7 @@ static inline void bio_get(struct bio *bio)
 {
bio->bi_flags |= (1 << BIO_REFFED);
smp_mb__before_atomic();
-   atomic_inc(&bio->__bi_cnt);
+   refcount_inc(&bio->__bi_cnt);
 }
 
 static inline void bio_cnt_set(struct bio *bio, unsigned int count)
@@ -262,7 +262,7 @@ static inline void bio_cnt_set(struct bio *bio, unsigned 
int count)
bio->bi_flags |= (1 << BIO_REFFED);
smp_mb__before_atomic();
}
-   atomic_set(&bio->__bi_cnt, count);
+   refcount_set(&bio->__bi_cnt, count);
 }
 
 static inline bool bio_flagged(struct bio *bio, unsigned int bit)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index a2d2aa7..1ec370e 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -7,6 +7,7 @@
 
 #include 
 #include 
+#include 
 
 struct bio_set;
 struct bio;
@@ -104,7 +105,7 @@ struct bio {
 
unsigned short  bi_max_vecs;/* max bvl_vecs we can hold */
 
-   atomic_t__bi_cnt;   /* pin count */
+   refcount_t  __bi_cnt;   /* pin count */
 
struct bio_vec  *bi_io_vec; /* the actual vec list */
 
-- 
2.7.4



[PATCH 04/11] fs, nfs: convert nfs4_pnfs_ds.ds_count from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable nfs4_pnfs_ds.ds_count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/nfs/pnfs.h |  3 ++-
 fs/nfs/pnfs_nfs.c | 10 +-
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 87f144f..cefa7da 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -30,6 +30,7 @@
 #ifndef FS_NFS_PNFS_H
 #define FS_NFS_PNFS_H
 
+#include 
 #include 
 #include 
 #include 
@@ -54,7 +55,7 @@ struct nfs4_pnfs_ds {
char*ds_remotestr;  /* comma sep list of addrs */
struct list_headds_addrs;
struct nfs_client   *ds_clp;
-   atomic_tds_count;
+   refcount_t  ds_count;
unsigned long   ds_state;
 #define NFS4DS_CONNECTING  0   /* ds is establishing connection */
 };
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 60da59b..03aaa60 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -338,7 +338,7 @@ print_ds(struct nfs4_pnfs_ds *ds)
"client %p\n"
"cl_exchange_flags %x\n",
ds->ds_remotestr,
-   atomic_read(&ds->ds_count), ds->ds_clp,
+   refcount_read(&ds->ds_count), ds->ds_clp,
ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
 }
 
@@ -451,7 +451,7 @@ static void destroy_ds(struct nfs4_pnfs_ds *ds)
 
 void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds)
 {
-   if (atomic_dec_and_lock(&ds->ds_count,
+   if (refcount_dec_and_lock(&ds->ds_count,
&nfs4_ds_cache_lock)) {
list_del_init(&ds->ds_node);
spin_unlock(&nfs4_ds_cache_lock);
@@ -537,7 +537,7 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
INIT_LIST_HEAD(&ds->ds_addrs);
list_splice_init(dsaddrs, &ds->ds_addrs);
ds->ds_remotestr = remotestr;
-   atomic_set(&ds->ds_count, 1);
+   refcount_set(&ds->ds_count, 1);
INIT_LIST_HEAD(&ds->ds_node);
ds->ds_clp = NULL;
list_add(&ds->ds_node, &nfs4_data_server_cache);
@@ -546,10 +546,10 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t 
gfp_flags)
} else {
kfree(remotestr);
kfree(ds);
-   atomic_inc(&tmp_ds->ds_count);
+   refcount_inc(&tmp_ds->ds_count);
dprintk("%s data server %s found, inc'ed ds_count to %d\n",
__func__, tmp_ds->ds_remotestr,
-   atomic_read(&tmp_ds->ds_count));
+   refcount_read(&tmp_ds->ds_count));
ds = tmp_ds;
}
spin_unlock(&nfs4_ds_cache_lock);
-- 
2.7.4



[PATCH 00/11] nfs refcount conversions

2017-10-20 Thread Elena Reshetova
This series, for nfs components, replaces atomic_t reference
counters with the new refcount_t type and API (see include/linux/refcount.h).
By doing this we prevent intentional or accidental
underflows or overflows that can led to use-after-free vulnerabilities.

The patches are fully independent and can be cherry-picked separately.
If there are no objections to the patches, please merge them via respective 
trees.

Rebased on top of linux-next.

Elena Reshetova (11):
  fs, nfsd: convert nfs4_stid.sc_count from atomic_t to refcount_t
  fs, nfsd: convert nfs4_cntl_odstate.co_odcount from atomic_t to
refcount_t
  fs, nfsd: convert nfs4_file.fi_ref from atomic_t to refcount_t
  fs, nfs: convert nfs4_pnfs_ds.ds_count from atomic_t to refcount_t
  fs, nfs: convert pnfs_layout_segment.pls_refcount from atomic_t to
refcount_t
  fs, nfs: convert pnfs_layout_hdr.plh_refcount from atomic_t to
refcount_t
  fs, nfs: convert nfs4_ff_layout_mirror.ref from atomic_t to refcount_t
  fs, nfs: convert nfs_cache_defer_req.count from atomic_t to refcount_t
  fs, nfs: convert nfs4_lock_state.ls_count from atomic_t to refcount_t
  fs, nfs: convert nfs_lock_context.count from atomic_t to refcount_t
  fs, nfs: convert nfs_client.cl_count from atomic_t to refcount_t

 fs/nfs/cache_lib.c |  6 +++---
 fs/nfs/cache_lib.h |  2 +-
 fs/nfs/client.c| 10 +-
 fs/nfs/filelayout/filelayout.c | 12 ++--
 fs/nfs/flexfilelayout/flexfilelayout.c | 20 +--
 fs/nfs/flexfilelayout/flexfilelayout.h |  3 ++-
 fs/nfs/inode.c | 12 ++--
 fs/nfs/nfs4_fs.h   |  2 +-
 fs/nfs/nfs4client.c| 10 +-
 fs/nfs/nfs4proc.c  | 18 -
 fs/nfs/nfs4state.c | 14 ++---
 fs/nfs/pnfs.c  | 24 +++
 fs/nfs/pnfs.h  |  9 +
 fs/nfs/pnfs_nfs.c  | 10 +-
 fs/nfsd/nfs4layouts.c  |  4 ++--
 fs/nfsd/nfs4state.c| 36 +-
 fs/nfsd/state.h|  9 +
 include/linux/nfs_fs.h |  3 ++-
 include/linux/nfs_fs_sb.h  |  3 ++-
 19 files changed, 106 insertions(+), 101 deletions(-)

-- 
2.7.4



[PATCH 05/11] fs, nfs: convert pnfs_layout_segment.pls_refcount from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.

Signed-off-by: Elena Reshetova 
Signed-off-by: Hans Liljestrand 
Signed-off-by: Kees Cook 
Signed-off-by: David Windsor 
---
 fs/nfs/pnfs.c | 12 ++--
 fs/nfs/pnfs.h |  4 ++--
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 3bcd669..499bb71 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -450,7 +450,7 @@ pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct 
pnfs_layout_segment *lseg,
 {
INIT_LIST_HEAD(&lseg->pls_list);
INIT_LIST_HEAD(&lseg->pls_lc_list);
-   atomic_set(&lseg->pls_refcount, 1);
+   refcount_set(&lseg->pls_refcount, 1);
set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
lseg->pls_layout = lo;
lseg->pls_range = *range;
@@ -507,13 +507,13 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
return;
 
dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
-   atomic_read(&lseg->pls_refcount),
+   refcount_read(&lseg->pls_refcount),
test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
 
lo = lseg->pls_layout;
inode = lo->plh_inode;
 
-   if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
+   if (refcount_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
spin_unlock(&inode->i_lock);
return;
@@ -551,7 +551,7 @@ pnfs_lseg_range_contained(const struct pnfs_layout_range 
*l1,
 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
struct list_head *tmp_list)
 {
-   if (!atomic_dec_and_test(&lseg->pls_refcount))
+   if (!refcount_dec_and_test(&lseg->pls_refcount))
return false;
pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
list_add(&lseg->pls_list, tmp_list);
@@ -570,7 +570,7 @@ static int mark_lseg_invalid(struct pnfs_layout_segment 
*lseg,
 * outstanding io is finished.
 */
dprintk("%s: lseg %p ref %d\n", __func__, lseg,
-   atomic_read(&lseg->pls_refcount));
+   refcount_read(&lseg->pls_refcount));
if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
rv = 1;
}
@@ -1546,7 +1546,7 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
}
 
dprintk("%s:Return lseg %p ref %d\n",
-   __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
+   __func__, ret, ret ? refcount_read(&ret->pls_refcount) : 0);
return ret;
 }
 
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index cefa7da..f0e98e1 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -64,7 +64,7 @@ struct pnfs_layout_segment {
struct list_head pls_list;
struct list_head pls_lc_list;
struct pnfs_layout_range pls_range;
-   atomic_t pls_refcount;
+   refcount_t pls_refcount;
u32 pls_seq;
unsigned long pls_flags;
struct pnfs_layout_hdr *pls_layout;
@@ -394,7 +394,7 @@ static inline struct pnfs_layout_segment *
 pnfs_get_lseg(struct pnfs_layout_segment *lseg)
 {
if (lseg) {
-   atomic_inc(&lseg->pls_refcount);
+   refcount_inc(&lseg->pls_refcount);
smp_mb__after_atomic();
}
return lseg;
-- 
2.7.4



[PATCH 08/11] fs, nfs: convert nfs_cache_defer_req.count from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable nfs_cache_defer_req.count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/nfs/cache_lib.c | 6 +++---
 fs/nfs/cache_lib.h | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c
index 2ae676f..6167f13 100644
--- a/fs/nfs/cache_lib.c
+++ b/fs/nfs/cache_lib.c
@@ -66,7 +66,7 @@ int nfs_cache_upcall(struct cache_detail *cd, char 
*entry_name)
  */
 void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq)
 {
-   if (atomic_dec_and_test(&dreq->count))
+   if (refcount_dec_and_test(&dreq->count))
kfree(dreq);
 }
 
@@ -86,7 +86,7 @@ static struct cache_deferred_req *nfs_dns_cache_defer(struct 
cache_req *req)
 
dreq = container_of(req, struct nfs_cache_defer_req, req);
dreq->deferred_req.revisit = nfs_dns_cache_revisit;
-   atomic_inc(&dreq->count);
+   refcount_inc(&dreq->count);
 
return &dreq->deferred_req;
 }
@@ -98,7 +98,7 @@ struct nfs_cache_defer_req *nfs_cache_defer_req_alloc(void)
dreq = kzalloc(sizeof(*dreq), GFP_KERNEL);
if (dreq) {
init_completion(&dreq->completion);
-   atomic_set(&dreq->count, 1);
+   refcount_set(&dreq->count, 1);
dreq->req.defer = nfs_dns_cache_defer;
}
return dreq;
diff --git a/fs/nfs/cache_lib.h b/fs/nfs/cache_lib.h
index 4116d2c..02b378c 100644
--- a/fs/nfs/cache_lib.h
+++ b/fs/nfs/cache_lib.h
@@ -15,7 +15,7 @@ struct nfs_cache_defer_req {
struct cache_req req;
struct cache_deferred_req deferred_req;
struct completion completion;
-   atomic_t count;
+   refcount_t count;
 };
 
 extern int nfs_cache_upcall(struct cache_detail *cd, char *entry_name);
-- 
2.7.4



[PATCH 11/11] fs, nfs: convert nfs_client.cl_count from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable nfs_client.cl_count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/nfs/client.c| 10 +-
 fs/nfs/filelayout/filelayout.c | 12 ++--
 fs/nfs/flexfilelayout/flexfilelayout.c | 12 ++--
 fs/nfs/nfs4client.c| 10 +-
 fs/nfs/nfs4proc.c  | 12 ++--
 fs/nfs/nfs4state.c |  6 +++---
 include/linux/nfs_fs_sb.h  |  3 ++-
 7 files changed, 33 insertions(+), 32 deletions(-)

diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 22880ef..0ac2fb1 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -163,7 +163,7 @@ struct nfs_client *nfs_alloc_client(const struct 
nfs_client_initdata *cl_init)
 
clp->rpc_ops = clp->cl_nfs_mod->rpc_ops;
 
-   atomic_set(&clp->cl_count, 1);
+   refcount_set(&clp->cl_count, 1);
clp->cl_cons_state = NFS_CS_INITING;
 
memcpy(&clp->cl_addr, cl_init->addr, cl_init->addrlen);
@@ -269,7 +269,7 @@ void nfs_put_client(struct nfs_client *clp)
 
nn = net_generic(clp->cl_net, nfs_net_id);
 
-   if (atomic_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) {
+   if (refcount_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) {
list_del(&clp->cl_share_link);
nfs_cb_idr_remove_locked(clp);
spin_unlock(&nn->nfs_client_lock);
@@ -314,7 +314,7 @@ static struct nfs_client *nfs_match_client(const struct 
nfs_client_initdata *dat
   sap))
continue;
 
-   atomic_inc(&clp->cl_count);
+   refcount_inc(&clp->cl_count);
return clp;
}
return NULL;
@@ -1006,7 +1006,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server 
*source,
/* Copy data from the source */
server->nfs_client = source->nfs_client;
server->destroy = source->destroy;
-   atomic_inc(&server->nfs_client->cl_count);
+   refcount_inc(&server->nfs_client->cl_count);
nfs_server_copy_userdata(server, source);
 
server->fsid = fattr->fsid;
@@ -1166,7 +1166,7 @@ static int nfs_server_list_show(struct seq_file *m, void 
*v)
   clp->rpc_ops->version,
   rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR),
   rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT),
-  atomic_read(&clp->cl_count),
+  refcount_read(&clp->cl_count),
   clp->cl_hostname);
rcu_read_unlock();
 
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 508126e..4e54d8b 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -471,10 +471,10 @@ filelayout_read_pagelist(struct nfs_pgio_header *hdr)
return PNFS_NOT_ATTEMPTED;
 
dprintk("%s USE DS: %s cl_count %d\n", __func__,
-   ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
+   ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count));
 
/* No multipath support. Use first DS */
-   atomic_inc(&ds->ds_clp->cl_count);
+   refcount_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
hdr->ds_commit_idx = idx;
fh = nfs4_fl_select_ds_fh(lseg, j);
@@ -515,10 +515,10 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, 
int sync)
 
dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d\n",
__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
-   offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
+   offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count));
 
hdr->pgio_done_cb = filelayout_write_done_cb;
-   atomic_inc(&ds->ds_clp->cl_count);
+   refcount_inc(&ds->ds_clp->cl_count);
 

[PATCH 10/11] fs, nfs: convert nfs_lock_context.count from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable nfs_lock_context.count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/nfs/inode.c | 12 ++--
 include/linux/nfs_fs.h |  3 ++-
 2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 134d9f5..52a60e3 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -783,7 +783,7 @@ EXPORT_SYMBOL_GPL(nfs_getattr);
 
 static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
 {
-   atomic_set(&l_ctx->count, 1);
+   refcount_set(&l_ctx->count, 1);
l_ctx->lockowner = current->files;
INIT_LIST_HEAD(&l_ctx->list);
atomic_set(&l_ctx->io_count, 0);
@@ -797,7 +797,7 @@ static struct nfs_lock_context 
*__nfs_find_lock_context(struct nfs_open_context
do {
if (pos->lockowner != current->files)
continue;
-   atomic_inc(&pos->count);
+   refcount_inc(&pos->count);
return pos;
} while ((pos = list_entry(pos->list.next, typeof(*pos), list)) != 
head);
return NULL;
@@ -836,7 +836,7 @@ void nfs_put_lock_context(struct nfs_lock_context *l_ctx)
struct nfs_open_context *ctx = l_ctx->open_context;
struct inode *inode = d_inode(ctx->dentry);
 
-   if (!atomic_dec_and_lock(&l_ctx->count, &inode->i_lock))
+   if (!refcount_dec_and_lock(&l_ctx->count, &inode->i_lock))
return;
list_del(&l_ctx->list);
spin_unlock(&inode->i_lock);
@@ -913,7 +913,7 @@ EXPORT_SYMBOL_GPL(alloc_nfs_open_context);
 struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
 {
if (ctx != NULL)
-   atomic_inc(&ctx->lock_context.count);
+   refcount_inc(&ctx->lock_context.count);
return ctx;
 }
 EXPORT_SYMBOL_GPL(get_nfs_open_context);
@@ -924,11 +924,11 @@ static void __put_nfs_open_context(struct 
nfs_open_context *ctx, int is_sync)
struct super_block *sb = ctx->dentry->d_sb;
 
if (!list_empty(&ctx->list)) {
-   if (!atomic_dec_and_lock(&ctx->lock_context.count, 
&inode->i_lock))
+   if (!refcount_dec_and_lock(&ctx->lock_context.count, 
&inode->i_lock))
return;
list_del(&ctx->list);
spin_unlock(&inode->i_lock);
-   } else if (!atomic_dec_and_test(&ctx->lock_context.count))
+   } else if (!refcount_dec_and_test(&ctx->lock_context.count))
return;
if (inode != NULL)
NFS_PROTO(inode)->close_context(ctx, is_sync);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index a0282ce..51e9124 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -22,6 +22,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -55,7 +56,7 @@ struct nfs_access_entry {
 };
 
 struct nfs_lock_context {
-   atomic_t count;
+   refcount_t count;
struct list_head list;
struct nfs_open_context *open_context;
fl_owner_t lockowner;
-- 
2.7.4



[PATCH 09/11] fs, nfs: convert nfs4_lock_state.ls_count from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable nfs4_lock_state.ls_count  is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/nfs/nfs4_fs.h   | 2 +-
 fs/nfs/nfs4proc.c  | 6 +++---
 fs/nfs/nfs4state.c | 8 
 3 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index ac4f10b..29409a8 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -144,7 +144,7 @@ struct nfs4_lock_state {
unsigned long   ls_flags;
struct nfs_seqid_counterls_seqid;
nfs4_stateidls_stateid;
-   atomic_tls_count;
+   refcount_t  ls_count;
fl_owner_t  ls_owner;
 };
 
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f90090e..57c38ea 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2518,7 +2518,7 @@ static int nfs41_check_expired_locks(struct nfs4_state 
*state)
if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
 
-   atomic_inc(&lsp->ls_count);
+   refcount_inc(&lsp->ls_count);
spin_unlock(&state->state_lock);
 
nfs4_put_lock_state(prev);
@@ -5896,7 +5896,7 @@ static struct nfs4_unlockdata 
*nfs4_alloc_unlockdata(struct file_lock *fl,
p->arg.seqid = seqid;
p->res.seqid = seqid;
p->lsp = lsp;
-   atomic_inc(&lsp->ls_count);
+   refcount_inc(&lsp->ls_count);
/* Ensure we don't close file until we're done freeing locks! */
p->ctx = get_nfs_open_context(ctx);
p->l_ctx = nfs_get_lock_context(ctx);
@@ -6112,7 +6112,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct 
file_lock *fl,
p->res.lock_seqid = p->arg.lock_seqid;
p->lsp = lsp;
p->server = server;
-   atomic_inc(&lsp->ls_count);
+   refcount_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
memcpy(&p->fl, fl, sizeof(p->fl));
return p;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0378e225..1887134 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -825,7 +825,7 @@ __nfs4_find_lock_state(struct nfs4_state *state,
ret = pos;
}
if (ret)
-   atomic_inc(&ret->ls_count);
+   refcount_inc(&ret->ls_count);
return ret;
 }
 
@@ -843,7 +843,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct 
nfs4_state *state, f
if (lsp == NULL)
return NULL;
nfs4_init_seqid_counter(&lsp->ls_seqid);
-   atomic_set(&lsp->ls_count, 1);
+   refcount_set(&lsp->ls_count, 1);
lsp->ls_state = state;
lsp->ls_owner = fl_owner;
lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, 
GFP_NOFS);
@@ -907,7 +907,7 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
if (lsp == NULL)
return;
state = lsp->ls_state;
-   if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
+   if (!refcount_dec_and_lock(&lsp->ls_count, &state->state_lock))
return;
list_del(&lsp->ls_locks);
if (list_empty(&state->lock_states))
@@ -927,7 +927,7 @@ static void nfs4_fl_copy_lock(struct file_lock *dst, struct 
file_lock *src)
struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
 
dst->fl_u.nfs4_fl.owner = lsp;
-   atomic_inc(&lsp->ls_count);
+   refcount_inc(&lsp->ls_count);
 }
 
 static void nfs4_fl_release_lock(struct file_lock *fl)
-- 
2.7.4



[PATCH 07/11] fs, nfs: convert nfs4_ff_layout_mirror.ref from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable nfs4_ff_layout_mirror.ref is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/nfs/flexfilelayout/flexfilelayout.c | 8 
 fs/nfs/flexfilelayout/flexfilelayout.h | 3 ++-
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c 
b/fs/nfs/flexfilelayout/flexfilelayout.c
index b0fa83a..ff55a0a 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -187,7 +187,7 @@ ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
continue;
if (!ff_mirror_match_fh(mirror, pos))
continue;
-   if (atomic_inc_not_zero(&pos->ref)) {
+   if (refcount_inc_not_zero(&pos->ref)) {
spin_unlock(&inode->i_lock);
return pos;
}
@@ -218,7 +218,7 @@ static struct nfs4_ff_layout_mirror 
*ff_layout_alloc_mirror(gfp_t gfp_flags)
mirror = kzalloc(sizeof(*mirror), gfp_flags);
if (mirror != NULL) {
spin_lock_init(&mirror->lock);
-   atomic_set(&mirror->ref, 1);
+   refcount_set(&mirror->ref, 1);
INIT_LIST_HEAD(&mirror->mirrors);
}
return mirror;
@@ -242,7 +242,7 @@ static void ff_layout_free_mirror(struct 
nfs4_ff_layout_mirror *mirror)
 
 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
 {
-   if (mirror != NULL && atomic_dec_and_test(&mirror->ref))
+   if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
ff_layout_free_mirror(mirror);
 }
 
@@ -2286,7 +2286,7 @@ ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, 
&mirror->flags))
continue;
/* mirror refcount put in cleanup_layoutstats */
-   if (!atomic_inc_not_zero(&mirror->ref))
+   if (!refcount_inc_not_zero(&mirror->ref))
continue;
dev = &mirror->mirror_ds->id_node; 
memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h 
b/fs/nfs/flexfilelayout/flexfilelayout.h
index 98b34c9..ca35426 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -13,6 +13,7 @@
 #define FF_FLAGS_NO_IO_THRU_MDS  2
 #define FF_FLAGS_NO_READ_IO  4
 
+#include 
 #include "../pnfs.h"
 
 /* XXX: Let's filter out insanely large mirror count for now to avoid oom
@@ -81,7 +82,7 @@ struct nfs4_ff_layout_mirror {
nfs4_stateidstateid;
struct rpc_cred __rcu   *ro_cred;
struct rpc_cred __rcu   *rw_cred;
-   atomic_tref;
+   refcount_t  ref;
spinlock_t  lock;
unsigned long   flags;
struct nfs4_ff_layoutstat   read_stat;
-- 
2.7.4



[PATCH 06/11] fs, nfs: convert pnfs_layout_hdr.plh_refcount from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable pnfs_layout_hdr.plh_refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/nfs/pnfs.c | 12 ++--
 fs/nfs/pnfs.h |  2 +-
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 499bb71..4aab53b 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -251,7 +251,7 @@ EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
 void
 pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
 {
-   atomic_inc(&lo->plh_refcount);
+   refcount_inc(&lo->plh_refcount);
 }
 
 static struct pnfs_layout_hdr *
@@ -296,7 +296,7 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
 
pnfs_layoutreturn_before_put_layout_hdr(lo);
 
-   if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
+   if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
if (!list_empty(&lo->plh_segs))
WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
pnfs_detach_layout_hdr(lo);
@@ -395,14 +395,14 @@ pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int 
fail_bit)
 {
lo->plh_retry_timestamp = jiffies;
if (!test_and_set_bit(fail_bit, &lo->plh_flags))
-   atomic_inc(&lo->plh_refcount);
+   refcount_inc(&lo->plh_refcount);
 }
 
 static void
 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
 {
if (test_and_clear_bit(fail_bit, &lo->plh_flags))
-   atomic_dec(&lo->plh_refcount);
+   refcount_dec(&lo->plh_refcount);
 }
 
 static void
@@ -472,7 +472,7 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
list_del_init(&lseg->pls_list);
/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
-   atomic_dec(&lo->plh_refcount);
+   refcount_dec(&lo->plh_refcount);
if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
return;
if (list_empty(&lo->plh_segs) &&
@@ -1451,7 +1451,7 @@ alloc_init_layout_hdr(struct inode *ino,
lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
if (!lo)
return NULL;
-   atomic_set(&lo->plh_refcount, 1);
+   refcount_set(&lo->plh_refcount, 1);
INIT_LIST_HEAD(&lo->plh_layouts);
INIT_LIST_HEAD(&lo->plh_segs);
INIT_LIST_HEAD(&lo->plh_return_segs);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index f0e98e1..78de7a2 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -180,7 +180,7 @@ struct pnfs_layoutdriver_type {
 };
 
 struct pnfs_layout_hdr {
-   atomic_tplh_refcount;
+   refcount_t  plh_refcount;
atomic_tplh_outstanding; /* number of RPCs out */
struct list_headplh_layouts;   /* other client layouts */
struct list_headplh_bulk_destroy;
-- 
2.7.4



[PATCH 03/11] fs, nfsd: convert nfs4_file.fi_ref from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable nfs4_file.fi_ref is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/nfsd/nfs4state.c | 6 +++---
 fs/nfsd/state.h | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index fb61d79..55a5f7d 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -359,7 +359,7 @@ put_nfs4_file(struct nfs4_file *fi)
 {
might_lock(&state_lock);
 
-   if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
+   if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
hlist_del_rcu(&fi->fi_hash);
spin_unlock(&state_lock);
WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
@@ -3351,7 +3351,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned 
int hashval,
 {
lockdep_assert_held(&state_lock);
 
-   atomic_set(&fp->fi_ref, 1);
+   refcount_set(&fp->fi_ref, 1);
spin_lock_init(&fp->fi_lock);
INIT_LIST_HEAD(&fp->fi_stateids);
INIT_LIST_HEAD(&fp->fi_delegations);
@@ -3647,7 +3647,7 @@ find_file_locked(struct knfsd_fh *fh, unsigned int 
hashval)
 
hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
if (fh_match(&fp->fi_fhandle, fh)) {
-   if (atomic_inc_not_zero(&fp->fi_ref))
+   if (refcount_inc_not_zero(&fp->fi_ref))
return fp;
}
}
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 58eb5f4..4797429 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -482,7 +482,7 @@ struct nfs4_clnt_odstate {
  * the global state_lock spinlock.
  */
 struct nfs4_file {
-   atomic_tfi_ref;
+   refcount_t  fi_ref;
spinlock_t  fi_lock;
struct hlist_node   fi_hash;/* hash on fi_fhandle */
struct list_headfi_stateids;
@@ -635,7 +635,7 @@ struct nfs4_file *find_file(struct knfsd_fh *fh);
 void put_nfs4_file(struct nfs4_file *fi);
 static inline void get_nfs4_file(struct nfs4_file *fi)
 {
-   atomic_inc(&fi->fi_ref);
+   refcount_inc(&fi->fi_ref);
 }
 struct file *find_any_file(struct nfs4_file *f);
 
-- 
2.7.4



[PATCH 02/11] fs, nfsd: convert nfs4_cntl_odstate.co_odcount from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable nfs4_cntl_odstate.co_odcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/nfsd/nfs4state.c | 6 +++---
 fs/nfsd/state.h | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index d356b87..fb61d79 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -568,7 +568,7 @@ alloc_clnt_odstate(struct nfs4_client *clp)
co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
if (co) {
co->co_client = clp;
-   atomic_set(&co->co_odcount, 1);
+   refcount_set(&co->co_odcount, 1);
}
return co;
 }
@@ -586,7 +586,7 @@ static inline void
 get_clnt_odstate(struct nfs4_clnt_odstate *co)
 {
if (co)
-   atomic_inc(&co->co_odcount);
+   refcount_inc(&co->co_odcount);
 }
 
 static void
@@ -598,7 +598,7 @@ put_clnt_odstate(struct nfs4_clnt_odstate *co)
return;
 
fp = co->co_file;
-   if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
+   if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
list_del(&co->co_perfile);
spin_unlock(&fp->fi_lock);
 
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index f927aa4..58eb5f4 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -466,7 +466,7 @@ struct nfs4_clnt_odstate {
struct nfs4_client  *co_client;
struct nfs4_file*co_file;
struct list_headco_perfile;
-   atomic_tco_odcount;
+   refcount_t  co_odcount;
 };
 
 /*
-- 
2.7.4



[PATCH 01/11] fs, nfsd: convert nfs4_stid.sc_count from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable nfs4_stid.sc_count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/nfsd/nfs4layouts.c |  4 ++--
 fs/nfsd/nfs4state.c   | 24 
 fs/nfsd/state.h   |  3 ++-
 3 files changed, 16 insertions(+), 15 deletions(-)

diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index e122da6..fed0760 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -335,7 +335,7 @@ nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
 
trace_layout_recall(&ls->ls_stid.sc_stateid);
 
-   atomic_inc(&ls->ls_stid.sc_count);
+   refcount_inc(&ls->ls_stid.sc_count);
nfsd4_run_cb(&ls->ls_recall);
 
 out_unlock:
@@ -440,7 +440,7 @@ nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct 
nfs4_layout_stateid *ls)
goto done;
}
 
-   atomic_inc(&ls->ls_stid.sc_count);
+   refcount_inc(&ls->ls_stid.sc_count);
list_add_tail(&new->lo_perstate, &ls->ls_layouts);
new = NULL;
 done:
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 0c04f81..d356b87 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -656,7 +656,7 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, 
struct kmem_cache *sla
stid->sc_stateid.si_opaque.so_id = new_id;
stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
/* Will be incremented before return to client: */
-   atomic_set(&stid->sc_count, 1);
+   refcount_set(&stid->sc_count, 1);
spin_lock_init(&stid->sc_lock);
 
/*
@@ -813,7 +813,7 @@ nfs4_put_stid(struct nfs4_stid *s)
 
might_lock(&clp->cl_lock);
 
-   if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
+   if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
wake_up_all(&close_wq);
return;
}
@@ -913,7 +913,7 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct 
nfs4_file *fp)
if (status)
return status;
++fp->fi_delegees;
-   atomic_inc(&dp->dl_stid.sc_count);
+   refcount_inc(&dp->dl_stid.sc_count);
dp->dl_stid.sc_type = NFS4_DELEG_STID;
list_add(&dp->dl_perfile, &fp->fi_delegations);
list_add(&dp->dl_perclnt, &clp->cl_delegations);
@@ -1214,7 +1214,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid 
*stp,
 
WARN_ON_ONCE(!list_empty(&stp->st_locks));
 
-   if (!atomic_dec_and_test(&s->sc_count)) {
+   if (!refcount_dec_and_test(&s->sc_count)) {
wake_up_all(&close_wq);
return;
}
@@ -2072,7 +2072,7 @@ find_stateid_by_type(struct nfs4_client *cl, stateid_t 
*t, char typemask)
s = find_stateid_locked(cl, t);
if (s != NULL) {
if (typemask & s->sc_type)
-   atomic_inc(&s->sc_count);
+   refcount_inc(&s->sc_count);
else
s = NULL;
}
@@ -3514,7 +3514,7 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct 
nfsd4_open *open)
continue;
if (local->st_stateowner == &oo->oo_owner) {
ret = local;
-   atomic_inc(&ret->st_stid.sc_count);
+   refcount_inc(&ret->st_stid.sc_count);
break;
}
}
@@ -3573,7 +3573,7 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open 
*open)
goto out_unlock;
 
open->op_stp = NULL;
-   atomic_inc(&stp->st_stid.sc_count);
+   refcount_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_OPEN_STID;
INIT_LIST_HEAD(&stp->st_locks);
stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
@@ -3621,7 +3621,7 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net 
*net)
 * there should be no danger of the refcount going back up again at
 * this poin

[PATCH 1/2] fsnotify: convert fsnotify_group.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable fsnotify_group.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/notify/group.c| 6 +++---
 include/linux/fsnotify_backend.h | 3 ++-
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/fs/notify/group.c b/fs/notify/group.c
index 3235753..b7a4b6a 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -107,7 +107,7 @@ void fsnotify_destroy_group(struct fsnotify_group *group)
  */
 void fsnotify_get_group(struct fsnotify_group *group)
 {
-   atomic_inc(&group->refcnt);
+   refcount_inc(&group->refcnt);
 }
 
 /*
@@ -115,7 +115,7 @@ void fsnotify_get_group(struct fsnotify_group *group)
  */
 void fsnotify_put_group(struct fsnotify_group *group)
 {
-   if (atomic_dec_and_test(&group->refcnt))
+   if (refcount_dec_and_test(&group->refcnt))
fsnotify_final_destroy_group(group);
 }
 
@@ -131,7 +131,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct 
fsnotify_ops *ops)
return ERR_PTR(-ENOMEM);
 
/* set to 0 when there a no external references to this group */
-   atomic_set(&group->refcnt, 1);
+   refcount_set(&group->refcnt, 1);
atomic_set(&group->num_marks, 0);
atomic_set(&group->user_waits, 0);
 
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index c6c6931..20a57ba 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -17,6 +17,7 @@
 #include 
 #include 
 #include 
+#include 
 
 /*
  * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
@@ -135,7 +136,7 @@ struct fsnotify_group {
 * inotify_init() and the refcnt will hit 0 only when that fd has been
 * closed.
 */
-   atomic_t refcnt;/* things with interest in this group */
+   refcount_t refcnt;  /* things with interest in this group */
 
const struct fsnotify_ops *ops; /* how this group handles things */
 
-- 
2.7.4



[PATCH 2/2] fsnotify: convert fsnotify_mark.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable fsnotify_mark.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/notify/inotify/inotify_user.c |  4 ++--
 fs/notify/mark.c | 14 +++---
 include/linux/fsnotify_backend.h |  2 +-
 kernel/audit_tree.c  |  2 +-
 4 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 7cc7d3f..d3c20e0 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -376,7 +376,7 @@ static struct inotify_inode_mark 
*inotify_idr_find_locked(struct fsnotify_group
 
fsnotify_get_mark(fsn_mark);
/* One ref for being in the idr, one ref we just took */
-   BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
+   BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
}
 
return i_mark;
@@ -446,7 +446,7 @@ static void inotify_remove_from_idr(struct fsnotify_group 
*group,
 * One ref for being in the idr
 * one ref grabbed by inotify_idr_find
 */
-   if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 2)) {
+   if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d 
i_mark->group=%p\n",
 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
/* we can't really recover with bad ref cnting.. */
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 9991f88..45f1141 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -105,8 +105,8 @@ static DECLARE_WORK(connector_reaper_work, 
fsnotify_connector_destroy_workfn);
 
 void fsnotify_get_mark(struct fsnotify_mark *mark)
 {
-   WARN_ON_ONCE(!atomic_read(&mark->refcnt));
-   atomic_inc(&mark->refcnt);
+   WARN_ON_ONCE(!refcount_read(&mark->refcnt));
+   refcount_inc(&mark->refcnt);
 }
 
 /*
@@ -116,7 +116,7 @@ void fsnotify_get_mark(struct fsnotify_mark *mark)
  */
 static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
 {
-   return atomic_inc_not_zero(&mark->refcnt);
+   return refcount_inc_not_zero(&mark->refcnt);
 }
 
 static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
@@ -211,7 +211,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
 
/* Catch marks that were actually never attached to object */
if (!mark->connector) {
-   if (atomic_dec_and_test(&mark->refcnt))
+   if (refcount_dec_and_test(&mark->refcnt))
fsnotify_final_mark_destroy(mark);
return;
}
@@ -220,7 +220,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
 * We have to be careful so that traversals of obj_list under lock can
 * safely grab mark reference.
 */
-   if (!atomic_dec_and_lock(&mark->refcnt, &mark->connector->lock))
+   if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector->lock))
return;
 
conn = mark->connector;
@@ -338,7 +338,7 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
 
WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) &&
-atomic_read(&mark->refcnt) < 1 +
+refcount_read(&mark->refcnt) < 1 +
!!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED));
 
spin_lock(&mark->lock);
@@ -738,7 +738,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
 {
memset(mark, 0, sizeof(*mark));
spin_lock_init(&mark->lock);
-   atomic_set(&mark->refcnt, 1);
+   refcount_set(&mark->refcnt, 1);
fsnotify_get_group(group);
mark->group = group;
 }
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 20a57ba..26485b1 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -244,7 +244,7 @@ struct fsnotify_mark {
 

[PATCH 1/2] fsnotify: convert fsnotify_group.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable fsnotify_group.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/notify/group.c| 6 +++---
 include/linux/fsnotify_backend.h | 3 ++-
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/fs/notify/group.c b/fs/notify/group.c
index 3235753..b7a4b6a 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -107,7 +107,7 @@ void fsnotify_destroy_group(struct fsnotify_group *group)
  */
 void fsnotify_get_group(struct fsnotify_group *group)
 {
-   atomic_inc(&group->refcnt);
+   refcount_inc(&group->refcnt);
 }
 
 /*
@@ -115,7 +115,7 @@ void fsnotify_get_group(struct fsnotify_group *group)
  */
 void fsnotify_put_group(struct fsnotify_group *group)
 {
-   if (atomic_dec_and_test(&group->refcnt))
+   if (refcount_dec_and_test(&group->refcnt))
fsnotify_final_destroy_group(group);
 }
 
@@ -131,7 +131,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct 
fsnotify_ops *ops)
return ERR_PTR(-ENOMEM);
 
/* set to 0 when there a no external references to this group */
-   atomic_set(&group->refcnt, 1);
+   refcount_set(&group->refcnt, 1);
atomic_set(&group->num_marks, 0);
atomic_set(&group->user_waits, 0);
 
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index c6c6931..20a57ba 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -17,6 +17,7 @@
 #include 
 #include 
 #include 
+#include 
 
 /*
  * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
@@ -135,7 +136,7 @@ struct fsnotify_group {
 * inotify_init() and the refcnt will hit 0 only when that fd has been
 * closed.
 */
-   atomic_t refcnt;/* things with interest in this group */
+   refcount_t refcnt;  /* things with interest in this group */
 
const struct fsnotify_ops *ops; /* how this group handles things */
 
-- 
2.7.4



[PATCH] fs, cifs: convert tcon_link.tl_count from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable tcon_link.tl_count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/cifs/cifsglob.h | 5 +++--
 fs/cifs/connect.c  | 8 
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index de5b2e1..18963bf 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -28,6 +28,7 @@
 #include "cifsacl.h"
 #include 
 #include 
+#include 
 #include 
 #include "smb2pdu.h"
 
@@ -974,7 +975,7 @@ struct tcon_link {
 #define TCON_LINK_PENDING  1
 #define TCON_LINK_IN_TREE  2
unsigned long   tl_time;
-   atomic_ttl_count;
+   refcount_t  tl_count;
struct cifs_tcon*tl_tcon;
 };
 
@@ -992,7 +993,7 @@ static inline struct tcon_link *
 cifs_get_tlink(struct tcon_link *tlink)
 {
if (tlink && !IS_ERR(tlink))
-   atomic_inc(&tlink->tl_count);
+   refcount_inc(&tlink->tl_count);
return tlink;
 }
 
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 0bfc228..f861152 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2868,7 +2868,7 @@ cifs_put_tlink(struct tcon_link *tlink)
if (!tlink || IS_ERR(tlink))
return;
 
-   if (!atomic_dec_and_test(&tlink->tl_count) ||
+   if (!refcount_dec_and_test(&tlink->tl_count) ||
test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) {
tlink->tl_time = jiffies;
return;
@@ -4328,7 +4328,7 @@ cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
newtlink->tl_tcon = ERR_PTR(-EACCES);
set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
-   cifs_get_tlink(newtlink);
+   refcount_set(&newtlink->tl_count, 1);
 
spin_lock(&cifs_sb->tlink_tree_lock);
/* was one inserted after previous search? */
@@ -4406,11 +4406,11 @@ cifs_prune_tlinks(struct work_struct *work)
tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
 
if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
-   atomic_read(&tlink->tl_count) != 0 ||
+   refcount_read(&tlink->tl_count) != 0 ||
time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
continue;
 
-   cifs_get_tlink(tlink);
+   refcount_set(&tlink->tl_count, 1);
clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
rb_erase(tmp, root);
 
-- 
2.7.4



[PATCH] fs, nilfs: convert nilfs_root.count from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable nilfs_root.count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/nilfs2/the_nilfs.c | 8 
 fs/nilfs2/the_nilfs.h | 5 +++--
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 2dd75bf..afebb50 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -737,7 +737,7 @@ struct nilfs_root *nilfs_lookup_root(struct the_nilfs 
*nilfs, __u64 cno)
} else if (cno > root->cno) {
n = n->rb_right;
} else {
-   atomic_inc(&root->count);
+   refcount_inc(&root->count);
spin_unlock(&nilfs->ns_cptree_lock);
return root;
}
@@ -776,7 +776,7 @@ nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 
cno)
} else if (cno > root->cno) {
p = &(*p)->rb_right;
} else {
-   atomic_inc(&root->count);
+   refcount_inc(&root->count);
spin_unlock(&nilfs->ns_cptree_lock);
kfree(new);
return root;
@@ -786,7 +786,7 @@ nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 
cno)
new->cno = cno;
new->ifile = NULL;
new->nilfs = nilfs;
-   atomic_set(&new->count, 1);
+   refcount_set(&new->count, 1);
atomic64_set(&new->inodes_count, 0);
atomic64_set(&new->blocks_count, 0);
 
@@ -806,7 +806,7 @@ nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 
cno)
 
 void nilfs_put_root(struct nilfs_root *root)
 {
-   if (atomic_dec_and_test(&root->count)) {
+   if (refcount_dec_and_test(&root->count)) {
struct the_nilfs *nilfs = root->nilfs;
 
nilfs_sysfs_delete_snapshot_group(root);
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index b305c6f..883d732 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -27,6 +27,7 @@
 #include 
 #include 
 #include 
+#include 
 
 struct nilfs_sc_info;
 struct nilfs_sysfs_dev_subgroups;
@@ -246,7 +247,7 @@ struct nilfs_root {
__u64 cno;
struct rb_node rb_node;
 
-   atomic_t count;
+   refcount_t count;
struct the_nilfs *nilfs;
struct inode *ifile;
 
@@ -299,7 +300,7 @@ void nilfs_swap_super_block(struct the_nilfs *);
 
 static inline void nilfs_get_root(struct nilfs_root *root)
 {
-   atomic_inc(&root->count);
+   refcount_inc(&root->count);
 }
 
 static inline int nilfs_valid_fs(struct the_nilfs *nilfs)
-- 
2.7.4



[PATCH] ncpfs: convert ncp_request_reply.refs from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable ncp_request_reply.refs is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/ncpfs/sock.c | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 98b6db0..523d864 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -29,6 +29,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "ncp_fs.h"
 
@@ -52,7 +53,7 @@ static int _send(struct socket *sock, const void *buff, int 
len)
 struct ncp_request_reply {
struct list_head req;
wait_queue_head_t wq;
-   atomic_t refs;
+   refcount_t refs;
unsigned char* reply_buf;
size_t datalen;
int result;
@@ -72,7 +73,7 @@ static inline struct ncp_request_reply* ncp_alloc_req(void)
return NULL;
 
init_waitqueue_head(&req->wq);
-   atomic_set(&req->refs, (1));
+   refcount_set(&req->refs, (1));
req->status = RQ_IDLE;
 
return req;
@@ -80,12 +81,12 @@ static inline struct ncp_request_reply* ncp_alloc_req(void)
 
 static void ncp_req_get(struct ncp_request_reply *req)
 {
-   atomic_inc(&req->refs);
+   refcount_inc(&req->refs);
 }
 
 static void ncp_req_put(struct ncp_request_reply *req)
 {
-   if (atomic_dec_and_test(&req->refs))
+   if (refcount_dec_and_test(&req->refs))
kfree(req);
 }
 
-- 
2.7.4



[PATCH 5/5] fs, xfs: convert xfs_rui_log_item.rui_refcount from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable xfs_rui_log_item.rui_refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/xfs/xfs_rmap_item.c | 6 +++---
 fs/xfs/xfs_rmap_item.h | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index f3b139c..f914829f 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -204,7 +204,7 @@ xfs_rui_init(
ruip->rui_format.rui_nextents = nextents;
ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
atomic_set(&ruip->rui_next_extent, 0);
-   atomic_set(&ruip->rui_refcount, 2);
+   refcount_set(&ruip->rui_refcount, 2);
 
return ruip;
 }
@@ -243,8 +243,8 @@ void
 xfs_rui_release(
struct xfs_rui_log_item *ruip)
 {
-   ASSERT(atomic_read(&ruip->rui_refcount) > 0);
-   if (atomic_dec_and_test(&ruip->rui_refcount)) {
+   ASSERT(refcount_read(&ruip->rui_refcount) > 0);
+   if (refcount_dec_and_test(&ruip->rui_refcount)) {
xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
xfs_rui_item_free(ruip);
}
diff --git a/fs/xfs/xfs_rmap_item.h b/fs/xfs/xfs_rmap_item.h
index 340c968..1e425a9 100644
--- a/fs/xfs/xfs_rmap_item.h
+++ b/fs/xfs/xfs_rmap_item.h
@@ -64,7 +64,7 @@ struct kmem_zone;
  */
 struct xfs_rui_log_item {
struct xfs_log_item rui_item;
-   atomic_trui_refcount;
+   refcount_t  rui_refcount;
atomic_trui_next_extent;
unsigned long   rui_flags;  /* misc flags */
struct xfs_rui_log_format   rui_format;
-- 
2.7.4



[PATCH 3/5] fs, xfs: convert xlog_ticket.t_ref from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable xlog_ticket.t_ref is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/xfs/xfs_log.c  | 10 +-
 fs/xfs/xfs_log_priv.h |  2 +-
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index dc95a49..e4578c0 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -3571,8 +3571,8 @@ void
 xfs_log_ticket_put(
xlog_ticket_t   *ticket)
 {
-   ASSERT(atomic_read(&ticket->t_ref) > 0);
-   if (atomic_dec_and_test(&ticket->t_ref))
+   ASSERT(refcount_read(&ticket->t_ref) > 0);
+   if (refcount_dec_and_test(&ticket->t_ref))
kmem_zone_free(xfs_log_ticket_zone, ticket);
 }
 
@@ -3580,8 +3580,8 @@ xlog_ticket_t *
 xfs_log_ticket_get(
xlog_ticket_t   *ticket)
 {
-   ASSERT(atomic_read(&ticket->t_ref) > 0);
-   atomic_inc(&ticket->t_ref);
+   ASSERT(refcount_read(&ticket->t_ref) > 0);
+   refcount_inc(&ticket->t_ref);
return ticket;
 }
 
@@ -3703,7 +3703,7 @@ xlog_ticket_alloc(
 
unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes);
 
-   atomic_set(&tic->t_ref, 1);
+   refcount_set(&tic->t_ref, 1);
tic->t_task = current;
INIT_LIST_HEAD(&tic->t_queue);
tic->t_unit_res = unit_res;
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 51bf7b8..29f6e1f 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -168,7 +168,7 @@ typedef struct xlog_ticket {
struct list_head   t_queue;  /* reserve/write queue */
struct task_struct *t_task;  /* task that owns this ticket */
xlog_tid_t t_tid;/* transaction identifier   : 4  */
-   atomic_t   t_ref;/* ticket reference count   : 4  */
+   refcount_t t_ref;/* ticket reference count   : 4  */
intt_curr_res;   /* current reservation in bytes : 4  */
intt_unit_res;   /* unit reservation in bytes: 4  */
char   t_ocnt;   /* original count   : 1  */
-- 
2.7.4



[PATCH 2/5] fs, xfs: convert xfs_efi_log_item.efi_refcount from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable xfs_efi_log_item.efi_refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/xfs/xfs_extfree_item.c | 6 +++---
 fs/xfs/xfs_extfree_item.h | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 44f8c54..dfe2bcb 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -220,7 +220,7 @@ xfs_efi_init(
efip->efi_format.efi_nextents = nextents;
efip->efi_format.efi_id = (uintptr_t)(void *)efip;
atomic_set(&efip->efi_next_extent, 0);
-   atomic_set(&efip->efi_refcount, 2);
+   refcount_set(&efip->efi_refcount, 2);
 
return efip;
 }
@@ -290,8 +290,8 @@ void
 xfs_efi_release(
struct xfs_efi_log_item *efip)
 {
-   ASSERT(atomic_read(&efip->efi_refcount) > 0);
-   if (atomic_dec_and_test(&efip->efi_refcount)) {
+   ASSERT(refcount_read(&efip->efi_refcount) > 0);
+   if (refcount_dec_and_test(&efip->efi_refcount)) {
xfs_trans_ail_remove(&efip->efi_item, SHUTDOWN_LOG_IO_ERROR);
xfs_efi_item_free(efip);
}
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index a32c794..fadf736 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -64,7 +64,7 @@ struct kmem_zone;
  */
 typedef struct xfs_efi_log_item {
xfs_log_item_t  efi_item;
-   atomic_tefi_refcount;
+   refcount_t  efi_refcount;
atomic_tefi_next_extent;
unsigned long   efi_flags;  /* misc flags */
xfs_efi_log_format_tefi_format;
-- 
2.7.4



[PATCH 4/5] fs, xfs: convert xfs_cui_log_item.cui_refcount from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable xfs_cui_log_item.cui_refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/xfs/xfs_refcount_item.c | 6 +++---
 fs/xfs/xfs_refcount_item.h | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index 8f2e2fa..004d002 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -205,7 +205,7 @@ xfs_cui_init(
cuip->cui_format.cui_nextents = nextents;
cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
atomic_set(&cuip->cui_next_extent, 0);
-   atomic_set(&cuip->cui_refcount, 2);
+   refcount_set(&cuip->cui_refcount, 2);
 
return cuip;
 }
@@ -221,8 +221,8 @@ void
 xfs_cui_release(
struct xfs_cui_log_item *cuip)
 {
-   ASSERT(atomic_read(&cuip->cui_refcount) > 0);
-   if (atomic_dec_and_test(&cuip->cui_refcount)) {
+   ASSERT(refcount_read(&cuip->cui_refcount) > 0);
+   if (refcount_dec_and_test(&cuip->cui_refcount)) {
xfs_trans_ail_remove(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
xfs_cui_item_free(cuip);
}
diff --git a/fs/xfs/xfs_refcount_item.h b/fs/xfs/xfs_refcount_item.h
index 5b74ddd..abc0377 100644
--- a/fs/xfs/xfs_refcount_item.h
+++ b/fs/xfs/xfs_refcount_item.h
@@ -63,7 +63,7 @@ struct kmem_zone;
  */
 struct xfs_cui_log_item {
struct xfs_log_item cui_item;
-   atomic_tcui_refcount;
+   refcount_t  cui_refcount;
atomic_tcui_next_extent;
unsigned long   cui_flags;  /* misc flags */
struct xfs_cui_log_format   cui_format;
-- 
2.7.4



[PATCH 0/5] xfs refcount conversions

2017-10-20 Thread Elena Reshetova
Note: our previous thread didn't finish in any conclusion, so
I am resending this now (rebased at latest linux-next) to revive
the discussion. refcount_t is slowly becoming a standard for
refcounters and we would really like to make all conversions
done where it is applicable.

**

This series, for xfs, replaces atomic_t reference
counters with the new refcount_t type and API (see include/linux/refcount.h).
By doing this we prevent intentional or accidental
underflows or overflows that can lead to use-after-free vulnerabilities.

The patches are fully independent and can be cherry-picked separately.
If there are no objections to the patches, please merge them via respective 
trees.

Elena Reshetova (5):
  fs, xfs: convert xfs_bui_log_item.bui_refcount from atomic_t to
refcount_t
  fs, xfs: convert xfs_efi_log_item.efi_refcount from atomic_t to
refcount_t
  fs, xfs: convert xlog_ticket.t_ref from atomic_t to refcount_t
  fs, xfs: convert xfs_cui_log_item.cui_refcount from atomic_t to
refcount_t
  fs, xfs: convert xfs_rui_log_item.rui_refcount from atomic_t to
refcount_t

 fs/xfs/xfs_bmap_item.c |  6 +++---
 fs/xfs/xfs_bmap_item.h |  2 +-
 fs/xfs/xfs_extfree_item.c  |  6 +++---
 fs/xfs/xfs_extfree_item.h  |  2 +-
 fs/xfs/xfs_linux.h |  1 +
 fs/xfs/xfs_log.c   | 10 +-
 fs/xfs/xfs_log_priv.h  |  2 +-
 fs/xfs/xfs_refcount_item.c |  6 +++---
 fs/xfs/xfs_refcount_item.h |  2 +-
 fs/xfs/xfs_rmap_item.c |  6 +++---
 fs/xfs/xfs_rmap_item.h |  2 +-
 11 files changed, 23 insertions(+), 22 deletions(-)

-- 
2.7.4



[PATCH 1/5] fs, xfs: convert xfs_bui_log_item.bui_refcount from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable xfs_bui_log_item.bui_refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/xfs/xfs_bmap_item.c | 6 +++---
 fs/xfs/xfs_bmap_item.h | 2 +-
 fs/xfs/xfs_linux.h | 1 +
 3 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index dd136f7..0f2e3d8 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -201,7 +201,7 @@ xfs_bui_init(
buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS;
buip->bui_format.bui_id = (uintptr_t)(void *)buip;
atomic_set(&buip->bui_next_extent, 0);
-   atomic_set(&buip->bui_refcount, 2);
+   refcount_set(&buip->bui_refcount, 2);
 
return buip;
 }
@@ -217,8 +217,8 @@ void
 xfs_bui_release(
struct xfs_bui_log_item *buip)
 {
-   ASSERT(atomic_read(&buip->bui_refcount) > 0);
-   if (atomic_dec_and_test(&buip->bui_refcount)) {
+   ASSERT(refcount_read(&buip->bui_refcount) > 0);
+   if (refcount_dec_and_test(&buip->bui_refcount)) {
xfs_trans_ail_remove(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR);
xfs_bui_item_free(buip);
}
diff --git a/fs/xfs/xfs_bmap_item.h b/fs/xfs/xfs_bmap_item.h
index c867daa..7048b14 100644
--- a/fs/xfs/xfs_bmap_item.h
+++ b/fs/xfs/xfs_bmap_item.h
@@ -61,7 +61,7 @@ struct kmem_zone;
  */
 struct xfs_bui_log_item {
struct xfs_log_item bui_item;
-   atomic_tbui_refcount;
+   refcount_t  bui_refcount;
atomic_tbui_next_extent;
unsigned long   bui_flags;  /* misc flags */
struct xfs_bui_log_format   bui_format;
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index dcd1292..0def85f 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -20,6 +20,7 @@
 
 #include 
 #include 
+#include 
 
 /*
  * Kernel specific type declarations for XFS
-- 
2.7.4



[PATCH 05/15] sched/task_struct: convert task_struct.usage to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable task_struct.usage is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/init_task.h  | 2 +-
 include/linux/sched.h  | 3 ++-
 include/linux/sched/task.h | 4 ++--
 kernel/fork.c  | 4 ++--
 4 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index a85376e..64d86ec 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -226,7 +226,7 @@ extern struct cred init_cred;
INIT_TASK_TI(tsk)   \
.state  = 0,\
.stack  = init_stack,   \
-   .usage  = ATOMIC_INIT(2),   \
+   .usage  = REFCOUNT_INIT(2), \
.flags  = PF_KTHREAD,   \
.prio   = MAX_PRIO-20,  \
.static_prio= MAX_PRIO-20,  \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0f897df..47f1101 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -20,6 +20,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -536,7 +537,7 @@ struct task_struct {
randomized_struct_fields_start
 
void*stack;
-   atomic_tusage;
+   refcount_t  usage;
/* Per task flags (PF_*), defined further below: */
unsigned intflags;
unsigned intptrace;
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index a5e6f09..ac4317d 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -85,13 +85,13 @@ extern void sched_exec(void);
 #define sched_exec()   {}
 #endif
 
-#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+#define get_task_struct(tsk) do { refcount_inc(&(tsk)->usage); } while(0)
 
 extern void __put_task_struct(struct task_struct *t);
 
 static inline void put_task_struct(struct task_struct *t)
 {
-   if (atomic_dec_and_test(&t->usage))
+   if (refcount_dec_and_test(&t->usage))
__put_task_struct(t);
 }
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 869850b..68cc7a0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -649,7 +649,7 @@ static inline void put_signal_struct(struct signal_struct 
*sig)
 void __put_task_struct(struct task_struct *tsk)
 {
WARN_ON(!tsk->exit_state);
-   WARN_ON(atomic_read(&tsk->usage));
+   WARN_ON(refcount_read(&tsk->usage));
WARN_ON(tsk == current);
 
cgroup_free(tsk);
@@ -824,7 +824,7 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig, int node)
 * One for us, one for whoever does the "release_task()" (usually
 * parent)
 */
-   atomic_set(&tsk->usage, 2);
+   refcount_set(&tsk->usage, 2);
 #ifdef CONFIG_BLK_DEV_IO_TRACE
tsk->btrace_seq = 0;
 #endif
-- 
2.7.4



[PATCH 00/15] v5 kernel core pieces refcount conversions

2017-10-20 Thread Elena Reshetova
Note: this is just a fresh rebase on top of linux-next.
No functional changes.
 

Changes in v5:
 * Kees catched that the following changes in
   perf_event_context.refcount and futex_pi_state.refcount
   are not correct now when ARCH_HAS_REFCOUNT is enabled:
-   WARN_ON(!atomic_inc_not_zero(refcount));
+   refcount_inc(refcount);
   So they are now changed back to using refcount_inc_not_zero. 

Changes in v4:
 * just rebase and corrections on linux-next/master

Changes in v3:
 * SoB chain corrected
 * minor corrections based on v2 feedback
 * rebase on linux-next/master as of today

Changes in v2:
 * dropped already merged patches
 * rebase on top of linux-next/master
 * Now by default refcount_t = atomic_t (*) and uses all atomic
   standard operations unless CONFIG_REFCOUNT_FULL is enabled.
   This is a compromise for the systems that are critical on
   performance (such as net) and cannot accept even slight delay
   on the refcounter operations.

This series, for core kernel components, replaces atomic_t reference
counters with the new refcount_t type and API (see include/linux/refcount.h).
By doing this we prevent intentional or accidental
underflows or overflows that can led to use-after-free vulnerabilities.

The patches are fully independent and can be cherry-picked separately.
If there are no objections to the patches, please merge them via respective 
trees.


Elena Reshetova (15):
  sched: convert sighand_struct.count to refcount_t
  sched: convert signal_struct.sigcnt to refcount_t
  sched: convert user_struct.__count to refcount_t
  sched: convert numa_group.refcount to refcount_t
  sched/task_struct: convert task_struct.usage to refcount_t
  sched/task_struct: convert task_struct.stack_refcount to refcount_t
  perf: convert perf_event_context.refcount to refcount_t
  perf/ring_buffer: convert ring_buffer.refcount to refcount_t
  perf/ring_buffer: convert ring_buffer.aux_refcount to refcount_t
  uprobes: convert uprobe.ref to refcount_t
  nsproxy: convert nsproxy.count to refcount_t
  groups: convert group_info.usage to refcount_t
  creds: convert cred.usage to refcount_t
  kcov: convert kcov.refcount to refcount_t
  bdi: convert bdi_writeback_congested.refcnt from atomic_t to
refcount_t

 fs/exec.c|  4 ++--
 fs/proc/task_nommu.c |  2 +-
 include/linux/backing-dev-defs.h |  3 ++-
 include/linux/backing-dev.h  |  4 ++--
 include/linux/cred.h | 13 ++--
 include/linux/init_task.h|  7 +++---
 include/linux/nsproxy.h  |  6 +++---
 include/linux/perf_event.h   |  3 ++-
 include/linux/sched.h|  5 +++--
 include/linux/sched/signal.h |  5 +++--
 include/linux/sched/task.h   |  4 ++--
 include/linux/sched/task_stack.h |  2 +-
 include/linux/sched/user.h   |  5 +++--
 kernel/cred.c| 46 
 kernel/events/core.c | 18 
 kernel/events/internal.h |  5 +++--
 kernel/events/ring_buffer.c  |  8 +++
 kernel/events/uprobes.c  |  8 +++
 kernel/fork.c| 24 ++---
 kernel/groups.c  |  2 +-
 kernel/kcov.c|  9 
 kernel/nsproxy.c |  6 +++---
 kernel/sched/fair.c  | 12 +--
 kernel/user.c|  8 +++
 mm/backing-dev.c | 14 ++--
 25 files changed, 117 insertions(+), 106 deletions(-)

-- 
2.7.4



[PATCH 02/15] sched: convert signal_struct.sigcnt to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable signal_struct.sigcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/sched/signal.h | 2 +-
 kernel/fork.c| 6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index b40fbf7..856d957 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -77,7 +77,7 @@ struct thread_group_cputimer {
  * the locking of signal_struct.
  */
 struct signal_struct {
-   atomic_tsigcnt;
+   refcount_t  sigcnt;
atomic_tlive;
int nr_threads;
struct list_headthread_head;
diff --git a/kernel/fork.c b/kernel/fork.c
index ab4ddc9..869850b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -642,7 +642,7 @@ static inline void free_signal_struct(struct signal_struct 
*sig)
 
 static inline void put_signal_struct(struct signal_struct *sig)
 {
-   if (atomic_dec_and_test(&sig->sigcnt))
+   if (refcount_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
 }
 
@@ -1443,7 +1443,7 @@ static int copy_signal(unsigned long clone_flags, struct 
task_struct *tsk)
 
sig->nr_threads = 1;
atomic_set(&sig->live, 1);
-   atomic_set(&sig->sigcnt, 1);
+   refcount_set(&sig->sigcnt, 1);
 
/* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
@@ -1950,7 +1950,7 @@ static __latent_entropy struct task_struct *copy_process(
} else {
current->signal->nr_threads++;
atomic_inc(¤t->signal->live);
-   atomic_inc(¤t->signal->sigcnt);
+   refcount_inc(¤t->signal->sigcnt);
list_add_tail_rcu(&p->thread_group,
  &p->group_leader->thread_group);
list_add_tail_rcu(&p->thread_node,
-- 
2.7.4



[PATCH 06/15] sched/task_struct: convert task_struct.stack_refcount to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable task_struct.stack_refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/init_task.h| 3 ++-
 include/linux/sched.h| 2 +-
 include/linux/sched/task_stack.h | 2 +-
 kernel/fork.c| 6 +++---
 4 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 64d86ec..d3bc6ac 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -12,6 +12,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -206,7 +207,7 @@ extern struct cred init_cred;
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 # define INIT_TASK_TI(tsk) \
.thread_info = INIT_THREAD_INFO(tsk),   \
-   .stack_refcount = ATOMIC_INIT(1),
+   .stack_refcount = REFCOUNT_INIT(1),
 #else
 # define INIT_TASK_TI(tsk)
 #endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 47f1101..4eb19f9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1091,7 +1091,7 @@ struct task_struct {
 #endif
 #ifdef CONFIG_THREAD_INFO_IN_TASK
/* A live task holds one reference: */
-   atomic_tstack_refcount;
+   refcount_t  stack_refcount;
 #endif
 #ifdef CONFIG_LIVEPATCH
int patch_state;
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index df6ea66..aab3809 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -60,7 +60,7 @@ static inline unsigned long *end_of_stack(struct task_struct 
*p)
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 static inline void *try_get_task_stack(struct task_struct *tsk)
 {
-   return atomic_inc_not_zero(&tsk->stack_refcount) ?
+   return refcount_inc_not_zero(&tsk->stack_refcount) ?
task_stack_page(tsk) : NULL;
 }
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 68cc7a0..b7b26d5a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -362,7 +362,7 @@ static void release_task_stack(struct task_struct *tsk)
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 void put_task_stack(struct task_struct *tsk)
 {
-   if (atomic_dec_and_test(&tsk->stack_refcount))
+   if (refcount_dec_and_test(&tsk->stack_refcount))
release_task_stack(tsk);
 }
 #endif
@@ -380,7 +380,7 @@ void free_task(struct task_struct *tsk)
 * If the task had a separate stack allocation, it should be gone
 * by now.
 */
-   WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0);
+   WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0);
 #endif
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
@@ -795,7 +795,7 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig, int node)
tsk->stack_vm_area = stack_vm_area;
 #endif
 #ifdef CONFIG_THREAD_INFO_IN_TASK
-   atomic_set(&tsk->stack_refcount, 1);
+   refcount_set(&tsk->stack_refcount, 1);
 #endif
 
if (err)
-- 
2.7.4



[PATCH 03/15] sched: convert user_struct.__count to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable user_struct.__count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/sched/user.h | 5 +++--
 kernel/user.c  | 8 
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 3c07e41..afcbf19 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -3,6 +3,7 @@
 
 #include 
 #include 
+#include 
 
 struct key;
 
@@ -10,7 +11,7 @@ struct key;
  * Some day this will be a full-fledged user tracking system..
  */
 struct user_struct {
-   atomic_t __count;   /* reference count */
+   refcount_t __count; /* reference count */
atomic_t processes; /* How many processes does this user have? */
atomic_t sigpending;/* How many pending signals does this user 
have? */
 #ifdef CONFIG_FANOTIFY
@@ -54,7 +55,7 @@ extern struct user_struct root_user;
 extern struct user_struct * alloc_uid(kuid_t);
 static inline struct user_struct *get_uid(struct user_struct *u)
 {
-   atomic_inc(&u->__count);
+   refcount_inc(&u->__count);
return u;
 }
 extern void free_uid(struct user_struct *);
diff --git a/kernel/user.c b/kernel/user.c
index 00281ad..c072348 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -90,7 +90,7 @@ static DEFINE_SPINLOCK(uidhash_lock);
 
 /* root_user.__count is 1, for init task cred */
 struct user_struct root_user = {
-   .__count= ATOMIC_INIT(1),
+   .__count= REFCOUNT_INIT(1),
.processes  = ATOMIC_INIT(1),
.sigpending = ATOMIC_INIT(0),
.locked_shm = 0,
@@ -116,7 +116,7 @@ static struct user_struct *uid_hash_find(kuid_t uid, struct 
hlist_head *hashent)
 
hlist_for_each_entry(user, hashent, uidhash_node) {
if (uid_eq(user->uid, uid)) {
-   atomic_inc(&user->__count);
+   refcount_inc(&user->__count);
return user;
}
}
@@ -163,7 +163,7 @@ void free_uid(struct user_struct *up)
return;
 
local_irq_save(flags);
-   if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
+   if (refcount_dec_and_lock(&up->__count, &uidhash_lock))
free_user(up, flags);
else
local_irq_restore(flags);
@@ -184,7 +184,7 @@ struct user_struct *alloc_uid(kuid_t uid)
goto out_unlock;
 
new->uid = uid;
-   atomic_set(&new->__count, 1);
+   refcount_set(&new->__count, 1);
 
/*
 * Before adding this, check whether we raced
-- 
2.7.4



[PATCH 01/15] sched: convert sighand_struct.count to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable sighand_struct.count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/exec.c| 4 ++--
 fs/proc/task_nommu.c | 2 +-
 include/linux/init_task.h| 2 +-
 include/linux/sched/signal.h | 3 ++-
 kernel/fork.c| 8 
 5 files changed, 10 insertions(+), 9 deletions(-)

diff --git a/fs/exec.c b/fs/exec.c
index 704e195..20dbd98 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1181,7 +1181,7 @@ static int de_thread(struct task_struct *tsk)
flush_itimer_signals();
 #endif
 
-   if (atomic_read(&oldsighand->count) != 1) {
+   if (refcount_read(&oldsighand->count) != 1) {
struct sighand_struct *newsighand;
/*
 * This ->sighand is shared with the CLONE_SIGHAND
@@ -1191,7 +1191,7 @@ static int de_thread(struct task_struct *tsk)
if (!newsighand)
return -ENOMEM;
 
-   atomic_set(&newsighand->count, 1);
+   refcount_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action,
   sizeof(newsighand->action));
 
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index bdb0d0d..ba680e5 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -63,7 +63,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
else
bytes += kobjsize(current->files);
 
-   if (current->sighand && atomic_read(¤t->sighand->count) > 1)
+   if (current->sighand && refcount_read(¤t->sighand->count) > 1)
sbytes += kobjsize(current->sighand);
else
bytes += kobjsize(current->sighand);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index cc45798..a85376e 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -85,7 +85,7 @@ extern struct fs_struct init_fs;
 extern struct nsproxy init_nsproxy;
 
 #define INIT_SIGHAND(sighand) {
\
-   .count  = ATOMIC_INIT(1),   \
+   .count  = REFCOUNT_INIT(1), \
.action = { { { .sa_handler = SIG_DFL, } }, },  \
.siglock= __SPIN_LOCK_UNLOCKED(sighand.siglock),\
.signalfd_wqh   = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh),  
\
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index c5c137e..b40fbf7 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -7,13 +7,14 @@
 #include 
 #include 
 #include 
+#include 
 
 /*
  * Types defining task->signal and task->sighand and APIs using them:
  */
 
 struct sighand_struct {
-   atomic_tcount;
+   refcount_t  count;
struct k_sigaction  action[_NSIG];
spinlock_t  siglock;
wait_queue_head_t   signalfd_wqh;
diff --git a/kernel/fork.c b/kernel/fork.c
index 7fe10e5..ab4ddc9 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1381,7 +1381,7 @@ static int copy_sighand(unsigned long clone_flags, struct 
task_struct *tsk)
struct sighand_struct *sig;
 
if (clone_flags & CLONE_SIGHAND) {
-   atomic_inc(¤t->sighand->count);
+   refcount_inc(¤t->sighand->count);
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
@@ -1389,14 +1389,14 @@ static int copy_sighand(unsigned long clone_flags, 
struct task_struct *tsk)
if (!sig)
return -ENOMEM;
 
-   atomic_set(&sig->count, 1);
+   refcount_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
 }
 
 void __cleanup_sighand(struct sighand_struct *sighand)
 {
-   if (atomic_dec_and_test(&sighand->count)) {
+   if (refcount_dec_and_test(&sighand->count)) {
signalfd_cleanup(sighand);
/*
 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we

[PATCH 10/15] uprobes: convert uprobe.ref to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable uprobe.ref is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 kernel/events/uprobes.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 8d42d8f..3514b42 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -66,7 +66,7 @@ static struct percpu_rw_semaphore dup_mmap_sem;
 
 struct uprobe {
struct rb_node  rb_node;/* node in the rb tree */
-   atomic_tref;
+   refcount_t  ref;
struct rw_semaphore register_rwsem;
struct rw_semaphore consumer_rwsem;
struct list_headpending_list;
@@ -371,13 +371,13 @@ set_orig_insn(struct arch_uprobe *auprobe, struct 
mm_struct *mm, unsigned long v
 
 static struct uprobe *get_uprobe(struct uprobe *uprobe)
 {
-   atomic_inc(&uprobe->ref);
+   refcount_inc(&uprobe->ref);
return uprobe;
 }
 
 static void put_uprobe(struct uprobe *uprobe)
 {
-   if (atomic_dec_and_test(&uprobe->ref))
+   if (refcount_dec_and_test(&uprobe->ref))
kfree(uprobe);
 }
 
@@ -459,7 +459,7 @@ static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
rb_link_node(&uprobe->rb_node, parent, p);
rb_insert_color(&uprobe->rb_node, &uprobes_tree);
/* get access + creation ref */
-   atomic_set(&uprobe->ref, 2);
+   refcount_set(&uprobe->ref, 2);
 
return u;
 }
-- 
2.7.4



[PATCH 09/15] perf/ring_buffer: convert ring_buffer.aux_refcount to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable ring_buffer.aux_refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 kernel/events/core.c| 2 +-
 kernel/events/internal.h| 2 +-
 kernel/events/ring_buffer.c | 6 +++---
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 66d7e18..3848480 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5182,7 +5182,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
 
/* this has to be the last one */
rb_free_aux(rb);
-   WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
+   WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
 
mutex_unlock(&event->mmap_mutex);
}
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 1cdd9fa..cc5b545 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -48,7 +48,7 @@ struct ring_buffer {
atomic_taux_mmap_count;
unsigned long   aux_mmap_locked;
void(*free_aux)(void *);
-   atomic_taux_refcount;
+   refcount_t  aux_refcount;
void**aux_pages;
void*aux_priv;
 
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 86e1379..08838cd6 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -357,7 +357,7 @@ void *perf_aux_output_begin(struct perf_output_handle 
*handle,
if (!atomic_read(&rb->aux_mmap_count))
goto err;
 
-   if (!atomic_inc_not_zero(&rb->aux_refcount))
+   if (!refcount_inc_not_zero(&rb->aux_refcount))
goto err;
 
/*
@@ -655,7 +655,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event 
*event,
 * we keep a refcount here to make sure either of the two can
 * reference them safely.
 */
-   atomic_set(&rb->aux_refcount, 1);
+   refcount_set(&rb->aux_refcount, 1);
 
rb->aux_overwrite = overwrite;
rb->aux_watermark = watermark;
@@ -674,7 +674,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event 
*event,
 
 void rb_free_aux(struct ring_buffer *rb)
 {
-   if (atomic_dec_and_test(&rb->aux_refcount))
+   if (refcount_dec_and_test(&rb->aux_refcount))
__rb_free_aux(rb);
 }
 
-- 
2.7.4



[PATCH 08/15] perf/ring_buffer: convert ring_buffer.refcount to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable ring_buffer.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 kernel/events/core.c| 4 ++--
 kernel/events/internal.h| 3 ++-
 kernel/events/ring_buffer.c | 2 +-
 3 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7272b47..66d7e18 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5107,7 +5107,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event 
*event)
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
-   if (!atomic_inc_not_zero(&rb->refcount))
+   if (!refcount_inc_not_zero(&rb->refcount))
rb = NULL;
}
rcu_read_unlock();
@@ -5117,7 +5117,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event 
*event)
 
 void ring_buffer_put(struct ring_buffer *rb)
 {
-   if (!atomic_dec_and_test(&rb->refcount))
+   if (!refcount_dec_and_test(&rb->refcount))
return;
 
WARN_ON_ONCE(!list_empty(&rb->event_list));
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 843e970..1cdd9fa 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -3,13 +3,14 @@
 
 #include 
 #include 
+#include 
 
 /* Buffer handling */
 
 #define RING_BUFFER_WRITABLE   0x01
 
 struct ring_buffer {
-   atomic_trefcount;
+   refcount_t  refcount;
struct rcu_head rcu_head;
 #ifdef CONFIG_PERF_USE_VMALLOC
struct work_struct  work;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index f684d8e..86e1379 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -284,7 +284,7 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, 
int flags)
else
rb->overwrite = 1;
 
-   atomic_set(&rb->refcount, 1);
+   refcount_set(&rb->refcount, 1);
 
INIT_LIST_HEAD(&rb->event_list);
spin_lock_init(&rb->event_lock);
-- 
2.7.4



[PATCH 07/15] perf: convert perf_event_context.refcount to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable perf_event_context.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/perf_event.h |  3 ++-
 kernel/events/core.c   | 12 ++--
 2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 79b18a2..ab7f452 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -54,6 +54,7 @@ struct perf_guest_info_callbacks {
 #include 
 #include 
 #include 
+#include 
 #include 
 
 struct perf_callchain_entry {
@@ -735,7 +736,7 @@ struct perf_event_context {
int nr_stat;
int nr_freq;
int rotate_disable;
-   atomic_trefcount;
+   refcount_t  refcount;
struct task_struct  *task;
 
/*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 74671e1..7272b47 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1109,7 +1109,7 @@ static void perf_event_ctx_deactivate(struct 
perf_event_context *ctx)
 
 static void get_ctx(struct perf_event_context *ctx)
 {
-   WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
+   WARN_ON(!refcount_inc_not_zero(&ctx->refcount));
 }
 
 static void free_ctx(struct rcu_head *head)
@@ -1123,7 +1123,7 @@ static void free_ctx(struct rcu_head *head)
 
 static void put_ctx(struct perf_event_context *ctx)
 {
-   if (atomic_dec_and_test(&ctx->refcount)) {
+   if (refcount_dec_and_test(&ctx->refcount)) {
if (ctx->parent_ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task && ctx->task != TASK_TOMBSTONE)
@@ -1201,7 +1201,7 @@ perf_event_ctx_lock_nested(struct perf_event *event, int 
nesting)
 again:
rcu_read_lock();
ctx = ACCESS_ONCE(event->ctx);
-   if (!atomic_inc_not_zero(&ctx->refcount)) {
+   if (!refcount_inc_not_zero(&ctx->refcount)) {
rcu_read_unlock();
goto again;
}
@@ -1334,7 +1334,7 @@ perf_lock_task_context(struct task_struct *task, int 
ctxn, unsigned long *flags)
}
 
if (ctx->task == TASK_TOMBSTONE ||
-   !atomic_inc_not_zero(&ctx->refcount)) {
+   !refcount_inc_not_zero(&ctx->refcount)) {
raw_spin_unlock(&ctx->lock);
ctx = NULL;
} else {
@@ -3813,7 +3813,7 @@ static void __perf_event_init_context(struct 
perf_event_context *ctx)
INIT_LIST_HEAD(&ctx->pinned_groups);
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
-   atomic_set(&ctx->refcount, 1);
+   refcount_set(&ctx->refcount, 1);
 }
 
 static struct perf_event_context *
@@ -9895,7 +9895,7 @@ __perf_event_ctx_lock_double(struct perf_event 
*group_leader,
 again:
rcu_read_lock();
gctx = READ_ONCE(group_leader->ctx);
-   if (!atomic_inc_not_zero(&gctx->refcount)) {
+   if (!refcount_inc_not_zero(&gctx->refcount)) {
rcu_read_unlock();
goto again;
}
-- 
2.7.4



[PATCH 13/15] creds: convert cred.usage to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable cred.usage is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/cred.h |  6 +++---
 kernel/cred.c| 44 ++--
 2 files changed, 25 insertions(+), 25 deletions(-)

diff --git a/include/linux/cred.h b/include/linux/cred.h
index 00948dd..a9f217b 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -109,7 +109,7 @@ extern bool may_setgroups(void);
  * same context as task->real_cred.
  */
 struct cred {
-   atomic_tusage;
+   refcount_t  usage;
 #ifdef CONFIG_DEBUG_CREDENTIALS
atomic_tsubscribers;/* number of processes subscribed */
void*put_addr;
@@ -222,7 +222,7 @@ static inline bool cap_ambient_invariant_ok(const struct 
cred *cred)
  */
 static inline struct cred *get_new_cred(struct cred *cred)
 {
-   atomic_inc(&cred->usage);
+   refcount_inc(&cred->usage);
return cred;
 }
 
@@ -262,7 +262,7 @@ static inline void put_cred(const struct cred *_cred)
struct cred *cred = (struct cred *) _cred;
 
validate_creds(cred);
-   if (atomic_dec_and_test(&(cred)->usage))
+   if (refcount_dec_and_test(&(cred)->usage))
__put_cred(cred);
 }
 
diff --git a/kernel/cred.c b/kernel/cred.c
index 9604c1a..86c039a 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -42,7 +42,7 @@ struct group_info init_groups = { .usage = REFCOUNT_INIT(2) };
  * The initial credentials for the initial task
  */
 struct cred init_cred = {
-   .usage  = ATOMIC_INIT(4),
+   .usage  = REFCOUNT_INIT(4),
 #ifdef CONFIG_DEBUG_CREDENTIALS
.subscribers= ATOMIC_INIT(2),
.magic  = CRED_MAGIC,
@@ -101,17 +101,17 @@ static void put_cred_rcu(struct rcu_head *rcu)
 
 #ifdef CONFIG_DEBUG_CREDENTIALS
if (cred->magic != CRED_MAGIC_DEAD ||
-   atomic_read(&cred->usage) != 0 ||
+   refcount_read(&cred->usage) != 0 ||
read_cred_subscribers(cred) != 0)
panic("CRED: put_cred_rcu() sees %p with"
  " mag %x, put %p, usage %d, subscr %d\n",
  cred, cred->magic, cred->put_addr,
- atomic_read(&cred->usage),
+ refcount_read(&cred->usage),
  read_cred_subscribers(cred));
 #else
-   if (atomic_read(&cred->usage) != 0)
+   if (refcount_read(&cred->usage) != 0)
panic("CRED: put_cred_rcu() sees %p with usage %d\n",
- cred, atomic_read(&cred->usage));
+ cred, refcount_read(&cred->usage));
 #endif
 
security_cred_free(cred);
@@ -135,10 +135,10 @@ static void put_cred_rcu(struct rcu_head *rcu)
 void __put_cred(struct cred *cred)
 {
kdebug("__put_cred(%p{%d,%d})", cred,
-  atomic_read(&cred->usage),
+  refcount_read(&cred->usage),
   read_cred_subscribers(cred));
 
-   BUG_ON(atomic_read(&cred->usage) != 0);
+   BUG_ON(refcount_read(&cred->usage) != 0);
 #ifdef CONFIG_DEBUG_CREDENTIALS
BUG_ON(read_cred_subscribers(cred) != 0);
cred->magic = CRED_MAGIC_DEAD;
@@ -159,7 +159,7 @@ void exit_creds(struct task_struct *tsk)
struct cred *cred;
 
kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, 
tsk->cred,
-  atomic_read(&tsk->cred->usage),
+  refcount_read(&tsk->cred->usage),
   read_cred_subscribers(tsk->cred));
 
cred = (struct cred *) tsk->real_cred;
@@ -194,7 +194,7 @@ const struct cred *get_task_cred(struct task_struct *task)
do {
cred = __task_cred((task));
BUG_ON(!cred);
-   } while (!atomic_inc_not_zero(&((struct cred *)cred)->usage));
+   } while (!refcount_inc_not_zero(&((struct cred *)cred)->usage));
 
rcu_read_unlock();
return cred;
@@ -212,7 +212,7 @@ struct cred *cred_alloc_blank(voi

[PATCH 15/15] bdi: convert bdi_writeback_congested.refcnt from atomic_t to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable bdi_writeback_congested.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/backing-dev-defs.h |  3 ++-
 include/linux/backing-dev.h  |  4 ++--
 mm/backing-dev.c | 14 --
 3 files changed, 12 insertions(+), 9 deletions(-)

diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index b7c7be6..429fe3b 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -4,6 +4,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -75,7 +76,7 @@ enum wb_reason {
  */
 struct bdi_writeback_congested {
unsigned long state;/* WB_[a]sync_congested flags */
-   atomic_t refcnt;/* nr of attached wb's and blkg */
+   refcount_t refcnt;  /* nr of attached wb's and blkg */
 
 #ifdef CONFIG_CGROUP_WRITEBACK
struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e6f5037..f3b38c4 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -401,13 +401,13 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
 static inline struct bdi_writeback_congested *
 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
 {
-   atomic_inc(&bdi->wb_congested->refcnt);
+   refcount_inc(&bdi->wb_congested->refcnt);
return bdi->wb_congested;
 }
 
 static inline void wb_congested_put(struct bdi_writeback_congested *congested)
 {
-   if (atomic_dec_and_test(&congested->refcnt))
+   if (refcount_dec_and_test(&congested->refcnt))
kfree(congested);
 }
 
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 74b52df..e92a20f 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -440,14 +440,17 @@ wb_congested_get_create(struct backing_dev_info *bdi, int 
blkcg_id, gfp_t gfp)
node = &parent->rb_left;
else if (congested->blkcg_id > blkcg_id)
node = &parent->rb_right;
-   else
-   goto found;
+   else {
+   refcount_inc(&congested->refcnt);
+   goto found;
+   }
}
 
if (new_congested) {
/* !found and storage for new one already allocated, insert */
congested = new_congested;
new_congested = NULL;
+   refcount_set(&congested->refcnt, 1);
rb_link_node(&congested->rb_node, parent, node);
rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
goto found;
@@ -460,13 +463,12 @@ wb_congested_get_create(struct backing_dev_info *bdi, int 
blkcg_id, gfp_t gfp)
if (!new_congested)
return NULL;
 
-   atomic_set(&new_congested->refcnt, 0);
+   refcount_set(&new_congested->refcnt, 0);
new_congested->__bdi = bdi;
new_congested->blkcg_id = blkcg_id;
goto retry;
 
 found:
-   atomic_inc(&congested->refcnt);
spin_unlock_irqrestore(&cgwb_lock, flags);
kfree(new_congested);
return congested;
@@ -483,7 +485,7 @@ void wb_congested_put(struct bdi_writeback_congested 
*congested)
unsigned long flags;
 
local_irq_save(flags);
-   if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
+   if (!refcount_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
local_irq_restore(flags);
return;
}
@@ -793,7 +795,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
if (!bdi->wb_congested)
return -ENOMEM;
 
-   atomic_set(&bdi->wb_congested->refcnt, 1);
+   refcount_set(&bdi->wb_congested->refcnt, 1);
 
err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
if (err) {
-- 
2.7.4



[PATCH 14/15] kcov: convert kcov.refcount to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable kcov.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 kernel/kcov.c | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/kernel/kcov.c b/kernel/kcov.c
index 461c55e..03c174c 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -19,6 +19,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 /* Number of 64-bit words written per one comparison: */
@@ -43,7 +44,7 @@ struct kcov {
 *  - opened file descriptor
 *  - task with enabled coverage (we can't unwire it from another task)
 */
-   atomic_trefcount;
+   refcount_t  refcount;
/* The lock protects mode, size, area and t. */
spinlock_t  lock;
enum kcov_mode  mode;
@@ -227,12 +228,12 @@ EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
 
 static void kcov_get(struct kcov *kcov)
 {
-   atomic_inc(&kcov->refcount);
+   refcount_inc(&kcov->refcount);
 }
 
 static void kcov_put(struct kcov *kcov)
 {
-   if (atomic_dec_and_test(&kcov->refcount)) {
+   if (refcount_dec_and_test(&kcov->refcount)) {
vfree(kcov->area);
kfree(kcov);
}
@@ -310,7 +311,7 @@ static int kcov_open(struct inode *inode, struct file 
*filep)
if (!kcov)
return -ENOMEM;
kcov->mode = KCOV_MODE_DISABLED;
-   atomic_set(&kcov->refcount, 1);
+   refcount_set(&kcov->refcount, 1);
spin_lock_init(&kcov->lock);
filep->private_data = kcov;
return nonseekable_open(inode, filep);
-- 
2.7.4



[PATCH 12/15] groups: convert group_info.usage to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable group_info.usage is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/cred.h | 7 ---
 kernel/cred.c| 2 +-
 kernel/groups.c  | 2 +-
 3 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/include/linux/cred.h b/include/linux/cred.h
index 099058e..00948dd 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -17,6 +17,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -28,7 +29,7 @@ struct inode;
  * COW Supplementary groups list
  */
 struct group_info {
-   atomic_tusage;
+   refcount_t  usage;
int ngroups;
kgid_t  gid[0];
 } __randomize_layout;
@@ -44,7 +45,7 @@ struct group_info {
  */
 static inline struct group_info *get_group_info(struct group_info *gi)
 {
-   atomic_inc(&gi->usage);
+   refcount_inc(&gi->usage);
return gi;
 }
 
@@ -54,7 +55,7 @@ static inline struct group_info *get_group_info(struct 
group_info *gi)
  */
 #define put_group_info(group_info) \
 do {   \
-   if (atomic_dec_and_test(&(group_info)->usage))  \
+   if (refcount_dec_and_test(&(group_info)->usage))\
groups_free(group_info);\
 } while (0)
 
diff --git a/kernel/cred.c b/kernel/cred.c
index 0192a94..9604c1a 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -36,7 +36,7 @@ do {  
\
 static struct kmem_cache *cred_jar;
 
 /* init to 2 - one for init_task, one to ensure it is never freed */
-struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
+struct group_info init_groups = { .usage = REFCOUNT_INIT(2) };
 
 /*
  * The initial credentials for the initial task
diff --git a/kernel/groups.c b/kernel/groups.c
index 434f666..5fc6e21 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -23,7 +23,7 @@ struct group_info *groups_alloc(int gidsetsize)
if (!gi)
return NULL;
 
-   atomic_set(&gi->usage, 1);
+   refcount_set(&gi->usage, 1);
gi->ngroups = gidsetsize;
return gi;
 }
-- 
2.7.4



[PATCH 11/15] nsproxy: convert nsproxy.count to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable nsproxy.count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/nsproxy.h | 6 +++---
 kernel/nsproxy.c| 6 +++---
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index ac0d65b..f862ba8 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -28,7 +28,7 @@ struct fs_struct;
  * nsproxy is copied.
  */
 struct nsproxy {
-   atomic_t count;
+   refcount_t count;
struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
@@ -74,14 +74,14 @@ int __init nsproxy_cache_init(void);
 
 static inline void put_nsproxy(struct nsproxy *ns)
 {
-   if (atomic_dec_and_test(&ns->count)) {
+   if (refcount_dec_and_test(&ns->count)) {
free_nsproxy(ns);
}
 }
 
 static inline void get_nsproxy(struct nsproxy *ns)
 {
-   atomic_inc(&ns->count);
+   refcount_inc(&ns->count);
 }
 
 #endif
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index f6c5d33..5bfe691 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -31,7 +31,7 @@
 static struct kmem_cache *nsproxy_cachep;
 
 struct nsproxy init_nsproxy = {
-   .count  = ATOMIC_INIT(1),
+   .count  = REFCOUNT_INIT(1),
.uts_ns = &init_uts_ns,
 #if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
.ipc_ns = &init_ipc_ns,
@@ -52,7 +52,7 @@ static inline struct nsproxy *create_nsproxy(void)
 
nsproxy = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL);
if (nsproxy)
-   atomic_set(&nsproxy->count, 1);
+   refcount_set(&nsproxy->count, 1);
return nsproxy;
 }
 
@@ -225,7 +225,7 @@ void switch_task_namespaces(struct task_struct *p, struct 
nsproxy *new)
p->nsproxy = new;
task_unlock(p);
 
-   if (ns && atomic_dec_and_test(&ns->count))
+   if (ns && refcount_dec_and_test(&ns->count))
free_nsproxy(ns);
 }
 
-- 
2.7.4



[PATCH 04/15] sched: convert numa_group.refcount to refcount_t

2017-10-20 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable numa_group.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 kernel/sched/fair.c | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 27a2241..b4f0eb2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1050,7 +1050,7 @@ unsigned int sysctl_numa_balancing_scan_size = 256;
 unsigned int sysctl_numa_balancing_scan_delay = 1000;
 
 struct numa_group {
-   atomic_t refcount;
+   refcount_t refcount;
 
spinlock_t lock; /* nr_tasks, tasks */
int nr_tasks;
@@ -1119,7 +1119,7 @@ static unsigned int task_scan_start(struct task_struct *p)
unsigned long shared = group_faults_shared(ng);
unsigned long private = group_faults_priv(ng);
 
-   period *= atomic_read(&ng->refcount);
+   period *= refcount_read(&ng->refcount);
period *= shared + 1;
period /= private + shared + 1;
}
@@ -1142,7 +1142,7 @@ static unsigned int task_scan_max(struct task_struct *p)
unsigned long private = group_faults_priv(ng);
unsigned long period = smax;
 
-   period *= atomic_read(&ng->refcount);
+   period *= refcount_read(&ng->refcount);
period *= shared + 1;
period /= private + shared + 1;
 
@@ -2227,12 +2227,12 @@ static void task_numa_placement(struct task_struct *p)
 
 static inline int get_numa_group(struct numa_group *grp)
 {
-   return atomic_inc_not_zero(&grp->refcount);
+   return refcount_inc_not_zero(&grp->refcount);
 }
 
 static inline void put_numa_group(struct numa_group *grp)
 {
-   if (atomic_dec_and_test(&grp->refcount))
+   if (refcount_dec_and_test(&grp->refcount))
kfree_rcu(grp, rcu);
 }
 
@@ -2253,7 +2253,7 @@ static void task_numa_group(struct task_struct *p, int 
cpupid, int flags,
if (!grp)
return;
 
-   atomic_set(&grp->refcount, 1);
+   refcount_set(&grp->refcount, 1);
grp->active_nodes = 1;
grp->max_faults_cpu = 0;
spin_lock_init(&grp->lock);
-- 
2.7.4



[PATCH] refcount: provide same memory ordering guarantees as in atomic_t

2017-10-23 Thread Elena Reshetova
Currently arch. independent implementation of refcount_t in
lib/refcount.c provides weak memory ordering guarantees
compare to its analog atomic_t implementations.
While it should not be a problem for most of the actual
cases of refcounters, it is more understandable for everyone
(and more error-prone for future users) to provide exactly
same memory ordering guarantees as atomics.

If speed is of a concern, then either more efficient arch.
dependent refcount_t implementation should be used or if there
are enough users in the future we might need to provide both
strict and relaxed refcount_t APIs.

Suggested-by: Kees Cook 
Signed-off-by: Elena Reshetova 
---
 lib/refcount.c | 71 +-
 1 file changed, 6 insertions(+), 65 deletions(-)

diff --git a/lib/refcount.c b/lib/refcount.c
index 5d0582a..cc6946e 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -8,29 +8,7 @@
  * there. This avoids wrapping the counter and causing 'spurious'
  * use-after-free issues.
  *
- * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
- * and provide only what is strictly required for refcounts.
- *
- * The increments are fully relaxed; these will not provide ordering. The
- * rationale is that whatever is used to obtain the object we're increasing the
- * reference count on will provide the ordering. For locked data structures,
- * its the lock acquire, for RCU/lockless data structures its the dependent
- * load.
- *
- * Do note that inc_not_zero() provides a control dependency which will order
- * future stores against the inc, this ensures we'll never modify the object
- * if we did not in fact acquire a reference.
- *
- * The decrements will provide release order, such that all the prior loads and
- * stores will be issued before, it also provides a control dependency, which
- * will order us against the subsequent free().
- *
- * The control dependency is against the load of the cmpxchg (ll/sc) that
- * succeeded. This means the stores aren't fully ordered, but this is fine
- * because the 1->0 transition indicates no concurrency.
- *
- * Note that the allocator is responsible for ordering things between free()
- * and alloc().
+ * Memory ordering rules are exactly the same as with regular atomic_t 
functions
  *
  */
 
@@ -46,10 +24,6 @@
  *
  * Will saturate at UINT_MAX and WARN.
  *
- * Provides no memory ordering, it is assumed the caller has guaranteed the
- * object memory to be stable (RCU, etc.). It does provide a control dependency
- * and thereby orders future stores. See the comment on top.
- *
  * Use of this function is not recommended for the normal reference counting
  * use case in which references are taken and released one at a time.  In these
  * cases, refcount_inc(), or one of its variants, should instead be used to
@@ -72,7 +46,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
if (new < val)
new = UINT_MAX;
 
-   } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
+   } while (!atomic_try_cmpxchg(&r->refs, &val, new));
 
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
 
@@ -87,10 +61,6 @@ EXPORT_SYMBOL(refcount_add_not_zero);
  *
  * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
  *
- * Provides no memory ordering, it is assumed the caller has guaranteed the
- * object memory to be stable (RCU, etc.). It does provide a control dependency
- * and thereby orders future stores. See the comment on top.
- *
  * Use of this function is not recommended for the normal reference counting
  * use case in which references are taken and released one at a time.  In these
  * cases, refcount_inc(), or one of its variants, should instead be used to
@@ -108,10 +78,6 @@ EXPORT_SYMBOL(refcount_add);
  *
  * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
  *
- * Provides no memory ordering, it is assumed the caller has guaranteed the
- * object memory to be stable (RCU, etc.). It does provide a control dependency
- * and thereby orders future stores. See the comment on top.
- *
  * Return: true if the increment was successful, false otherwise
  */
 bool refcount_inc_not_zero(refcount_t *r)
@@ -127,7 +93,7 @@ bool refcount_inc_not_zero(refcount_t *r)
if (unlikely(!new))
return true;
 
-   } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
+   } while (!atomic_try_cmpxchg(&r->refs, &val, new));
 
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
 
@@ -141,9 +107,6 @@ EXPORT_SYMBOL(refcount_inc_not_zero);
  *
  * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
  *
- * Provides no memory ordering, it is assumed the caller already has a
- * reference on the object.
- *
  * Will WARN if the re

[PATCH 01/16] futex: convert futex_pi_state.refcount to refcount_t

2017-11-15 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable futex_pi_state.refcount is used as pure
reference counter. Convert it to refcount_t and fix up
the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the futex_pi_state.refcount it might make a difference
in following places:
 - get_pi_state() and exit_pi_state_list(): increment in
   refcount_inc_not_zero() only guarantees control dependency
   on success vs. fully ordered atomic counterpart
 - put_pi_state(): decrement in refcount_dec_and_test() only
   provides RELEASE ordering and control dependency on success
   vs. fully ordered atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 kernel/futex.c | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/kernel/futex.c b/kernel/futex.c
index 76ed592..907055f 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -67,6 +67,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 
@@ -209,7 +210,7 @@ struct futex_pi_state {
struct rt_mutex pi_mutex;
 
struct task_struct *owner;
-   atomic_t refcount;
+   refcount_t refcount;
 
union futex_key key;
 } __randomize_layout;
@@ -795,7 +796,7 @@ static int refill_pi_state_cache(void)
INIT_LIST_HEAD(&pi_state->list);
/* pi_mutex gets initialized later */
pi_state->owner = NULL;
-   atomic_set(&pi_state->refcount, 1);
+   refcount_set(&pi_state->refcount, 1);
pi_state->key = FUTEX_KEY_INIT;
 
current->pi_state_cache = pi_state;
@@ -815,7 +816,7 @@ static struct futex_pi_state *alloc_pi_state(void)
 
 static void get_pi_state(struct futex_pi_state *pi_state)
 {
-   WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount));
+   WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
 }
 
 /*
@@ -827,7 +828,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
if (!pi_state)
return;
 
-   if (!atomic_dec_and_test(&pi_state->refcount))
+   if (!refcount_dec_and_test(&pi_state->refcount))
return;
 
/*
@@ -857,7 +858,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
 * refcount is at 0 - put it back to 1.
 */
pi_state->owner = NULL;
-   atomic_set(&pi_state->refcount, 1);
+   refcount_set(&pi_state->refcount, 1);
current->pi_state_cache = pi_state;
}
 }
@@ -918,7 +919,7 @@ void exit_pi_state_list(struct task_struct *curr)
 * In that case; drop the locks to let put_pi_state() make
 * progress and retry the loop.
 */
-   if (!atomic_inc_not_zero(&pi_state->refcount)) {
+   if (!refcount_inc_not_zero(&pi_state->refcount)) {
raw_spin_unlock_irq(&curr->pi_lock);
cpu_relax();
raw_spin_lock_irq(&curr->pi_lock);
@@ -1074,7 +1075,7 @@ static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
 * free pi_state before we can take a reference ourselves.
 */
-   WARN_ON(!atomic_read(&pi_state->refcount));
+   WARN_ON(!refcount_read(&pi_state->refcount));
 
/*
 * Now that we have a pi_state, we can acquire wait_lock
-- 
2.7.4



[PATCH 02/16] sched: convert sighand_struct.count to refcount_t

2017-11-15 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable sighand_struct.count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the sighand_struct.count it might make a difference
in following places:
 - __cleanup_sighand: decrement in refcount_dec_and_test() only
   provides RELEASE ordering and control dependency on success
   vs. fully ordered atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 fs/exec.c| 4 ++--
 fs/proc/task_nommu.c | 2 +-
 include/linux/init_task.h| 2 +-
 include/linux/sched/signal.h | 3 ++-
 kernel/fork.c| 8 
 5 files changed, 10 insertions(+), 9 deletions(-)

diff --git a/fs/exec.c b/fs/exec.c
index 19e6325..09d99b5 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1181,7 +1181,7 @@ static int de_thread(struct task_struct *tsk)
flush_itimer_signals();
 #endif
 
-   if (atomic_read(&oldsighand->count) != 1) {
+   if (refcount_read(&oldsighand->count) != 1) {
struct sighand_struct *newsighand;
/*
 * This ->sighand is shared with the CLONE_SIGHAND
@@ -1191,7 +1191,7 @@ static int de_thread(struct task_struct *tsk)
if (!newsighand)
return -ENOMEM;
 
-   atomic_set(&newsighand->count, 1);
+   refcount_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action,
   sizeof(newsighand->action));
 
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 0b60ac6..684f808 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -64,7 +64,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
else
bytes += kobjsize(current->files);
 
-   if (current->sighand && atomic_read(¤t->sighand->count) > 1)
+   if (current->sighand && refcount_read(¤t->sighand->count) > 1)
sbytes += kobjsize(current->sighand);
else
bytes += kobjsize(current->sighand);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 6a53262..9eb2ce8 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -86,7 +86,7 @@ extern struct fs_struct init_fs;
 extern struct nsproxy init_nsproxy;
 
 #define INIT_SIGHAND(sighand) {
\
-   .count  = ATOMIC_INIT(1),   \
+   .count  = REFCOUNT_INIT(1), \
.action = { { { .sa_handler = SIG_DFL, } }, },  \
.siglock= __SPIN_LOCK_UNLOCKED(sighand.siglock),\
.signalfd_wqh   = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh),  
\
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 64d85fc..4a0e2d8 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -8,13 +8,14 @@
 #include 
 #include 
 #include 
+#include 
 
 /*
  * Types defining task->signal and task->sighand and APIs using them:
  */
 
 struct sighand_struct {
-   atomic_tcount;
+   refcount_t  count;
struct k_sigaction  action[_NSIG];
spinlock_t  siglock;
wait_queue_head_t   signalfd_wqh;
diff --git a/kernel/fork.c b/kernel/fork.c
index a1db74e..be451af 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1381,7 +1381,7 @@ static int copy_sighand(unsigned long clone_flags, struct 
task_struct *tsk)
struct sighand_struct *sig;
 
if (clone_flags & CLONE_SIGHAND) {
-   atomic_inc(¤t->si

[PATCH 05/16] sched: convert numa_group.refcount to refcount_t

2017-11-15 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable numa_group.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the numa_group.refcount it might make a difference
in following places:
 - get_numa_group(): increment in refcount_inc_not_zero() only
   guarantees control dependency on success vs. fully ordered
   atomic counterpart
 - put_numa_group(): decrement in refcount_dec_and_test() only
   provides RELEASE ordering and control dependency on success
   vs. fully ordered atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 kernel/sched/fair.c | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4037e19..b456b94 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1052,7 +1052,7 @@ unsigned int sysctl_numa_balancing_scan_size = 256;
 unsigned int sysctl_numa_balancing_scan_delay = 1000;
 
 struct numa_group {
-   atomic_t refcount;
+   refcount_t refcount;
 
spinlock_t lock; /* nr_tasks, tasks */
int nr_tasks;
@@ -1121,7 +1121,7 @@ static unsigned int task_scan_start(struct task_struct *p)
unsigned long shared = group_faults_shared(ng);
unsigned long private = group_faults_priv(ng);
 
-   period *= atomic_read(&ng->refcount);
+   period *= refcount_read(&ng->refcount);
period *= shared + 1;
period /= private + shared + 1;
}
@@ -1144,7 +1144,7 @@ static unsigned int task_scan_max(struct task_struct *p)
unsigned long private = group_faults_priv(ng);
unsigned long period = smax;
 
-   period *= atomic_read(&ng->refcount);
+   period *= refcount_read(&ng->refcount);
period *= shared + 1;
period /= private + shared + 1;
 
@@ -2229,12 +2229,12 @@ static void task_numa_placement(struct task_struct *p)
 
 static inline int get_numa_group(struct numa_group *grp)
 {
-   return atomic_inc_not_zero(&grp->refcount);
+   return refcount_inc_not_zero(&grp->refcount);
 }
 
 static inline void put_numa_group(struct numa_group *grp)
 {
-   if (atomic_dec_and_test(&grp->refcount))
+   if (refcount_dec_and_test(&grp->refcount))
kfree_rcu(grp, rcu);
 }
 
@@ -2255,7 +2255,7 @@ static void task_numa_group(struct task_struct *p, int 
cpupid, int flags,
if (!grp)
return;
 
-   atomic_set(&grp->refcount, 1);
+   refcount_set(&grp->refcount, 1);
grp->active_nodes = 1;
grp->max_faults_cpu = 0;
spin_lock_init(&grp->lock);
-- 
2.7.4



[PATCH 04/16] sched: convert user_struct.__count to refcount_t

2017-11-15 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable user_struct.__count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the user_struct.__count it might make a difference
in following places:
 - free_uid(): decrement in refcount_dec_and_lock() only
   provides RELEASE ordering, control dependency on success
   and will hold a spin lock on success vs. fully ordered
   atomic counterpart. Note there is no changes in spin lock
   locking here.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/sched/user.h | 5 +++--
 kernel/user.c  | 8 
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 0dcf4e4..2ca7cf4 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -4,6 +4,7 @@
 
 #include 
 #include 
+#include 
 
 struct key;
 
@@ -11,7 +12,7 @@ struct key;
  * Some day this will be a full-fledged user tracking system..
  */
 struct user_struct {
-   atomic_t __count;   /* reference count */
+   refcount_t __count; /* reference count */
atomic_t processes; /* How many processes does this user have? */
atomic_t sigpending;/* How many pending signals does this user 
have? */
 #ifdef CONFIG_FANOTIFY
@@ -55,7 +56,7 @@ extern struct user_struct root_user;
 extern struct user_struct * alloc_uid(kuid_t);
 static inline struct user_struct *get_uid(struct user_struct *u)
 {
-   atomic_inc(&u->__count);
+   refcount_inc(&u->__count);
return u;
 }
 extern void free_uid(struct user_struct *);
diff --git a/kernel/user.c b/kernel/user.c
index 9a20acc..f104474 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -96,7 +96,7 @@ static DEFINE_SPINLOCK(uidhash_lock);
 
 /* root_user.__count is 1, for init task cred */
 struct user_struct root_user = {
-   .__count= ATOMIC_INIT(1),
+   .__count= REFCOUNT_INIT(1),
.processes  = ATOMIC_INIT(1),
.sigpending = ATOMIC_INIT(0),
.locked_shm = 0,
@@ -122,7 +122,7 @@ static struct user_struct *uid_hash_find(kuid_t uid, struct 
hlist_head *hashent)
 
hlist_for_each_entry(user, hashent, uidhash_node) {
if (uid_eq(user->uid, uid)) {
-   atomic_inc(&user->__count);
+   refcount_inc(&user->__count);
return user;
}
}
@@ -169,7 +169,7 @@ void free_uid(struct user_struct *up)
return;
 
local_irq_save(flags);
-   if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
+   if (refcount_dec_and_lock(&up->__count, &uidhash_lock))
free_user(up, flags);
else
local_irq_restore(flags);
@@ -190,7 +190,7 @@ struct user_struct *alloc_uid(kuid_t uid)
goto out_unlock;
 
new->uid = uid;
-   atomic_set(&new->__count, 1);
+   refcount_set(&new->__count, 1);
 
/*
 * Before adding this, check whether we raced
-- 
2.7.4



[PATCH 10/16] perf/ring_buffer: convert ring_buffer.aux_refcount to refcount_t

2017-11-15 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable ring_buffer.aux_refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the ring_buffer.aux_refcount it might make a difference
in following places:
 - perf_aux_output_begin(): increment in refcount_inc_not_zero() only
   guarantees control dependency on success vs. fully ordered
   atomic counterpart
 - rb_free_aux(): decrement in refcount_dec_and_test() only
   provides RELEASE ordering and control dependency on success
   vs. fully ordered atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 kernel/events/core.c| 2 +-
 kernel/events/internal.h| 2 +-
 kernel/events/ring_buffer.c | 6 +++---
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3497c6a..5f087f4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5095,7 +5095,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
 
/* this has to be the last one */
rb_free_aux(rb);
-   WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
+   WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
 
mutex_unlock(&event->mmap_mutex);
}
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 86c5c7f..50ecf00 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -49,7 +49,7 @@ struct ring_buffer {
atomic_taux_mmap_count;
unsigned long   aux_mmap_locked;
void(*free_aux)(void *);
-   atomic_taux_refcount;
+   refcount_t  aux_refcount;
void**aux_pages;
void*aux_priv;
 
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index de12d36..b29d6ce 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -357,7 +357,7 @@ void *perf_aux_output_begin(struct perf_output_handle 
*handle,
if (!atomic_read(&rb->aux_mmap_count))
goto err;
 
-   if (!atomic_inc_not_zero(&rb->aux_refcount))
+   if (!refcount_inc_not_zero(&rb->aux_refcount))
goto err;
 
/*
@@ -659,7 +659,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event 
*event,
 * we keep a refcount here to make sure either of the two can
 * reference them safely.
 */
-   atomic_set(&rb->aux_refcount, 1);
+   refcount_set(&rb->aux_refcount, 1);
 
rb->aux_overwrite = overwrite;
rb->aux_watermark = watermark;
@@ -678,7 +678,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event 
*event,
 
 void rb_free_aux(struct ring_buffer *rb)
 {
-   if (atomic_dec_and_test(&rb->aux_refcount))
+   if (refcount_dec_and_test(&rb->aux_refcount))
__rb_free_aux(rb);
 }
 
-- 
2.7.4



[PATCH 03/16] sched: convert signal_struct.sigcnt to refcount_t

2017-11-15 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable signal_struct.sigcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the signal_struct.sigcnt it might make a difference
in following places:
 - put_signal_struct(): decrement in refcount_dec_and_test() only
   provides RELEASE ordering and control dependency on success
   vs. fully ordered atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/sched/signal.h | 2 +-
 kernel/fork.c| 6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 4a0e2d8..14e3a0c 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -78,7 +78,7 @@ struct thread_group_cputimer {
  * the locking of signal_struct.
  */
 struct signal_struct {
-   atomic_tsigcnt;
+   refcount_t  sigcnt;
atomic_tlive;
int nr_threads;
struct list_headthread_head;
diff --git a/kernel/fork.c b/kernel/fork.c
index be451af..a65ec7d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -642,7 +642,7 @@ static inline void free_signal_struct(struct signal_struct 
*sig)
 
 static inline void put_signal_struct(struct signal_struct *sig)
 {
-   if (atomic_dec_and_test(&sig->sigcnt))
+   if (refcount_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
 }
 
@@ -1443,7 +1443,7 @@ static int copy_signal(unsigned long clone_flags, struct 
task_struct *tsk)
 
sig->nr_threads = 1;
atomic_set(&sig->live, 1);
-   atomic_set(&sig->sigcnt, 1);
+   refcount_set(&sig->sigcnt, 1);
 
/* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
@@ -1952,7 +1952,7 @@ static __latent_entropy struct task_struct *copy_process(
} else {
current->signal->nr_threads++;
atomic_inc(¤t->signal->live);
-   atomic_inc(¤t->signal->sigcnt);
+   refcount_inc(¤t->signal->sigcnt);
list_add_tail_rcu(&p->thread_group,
  &p->group_leader->thread_group);
list_add_tail_rcu(&p->thread_node,
-- 
2.7.4



[PATCH 09/16] perf/ring_buffer: convert ring_buffer.refcount to refcount_t

2017-11-15 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable ring_buffer.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the ring_buffer.refcount it might make a difference
in following places:
 - ring_buffer_get(): increment in refcount_inc_not_zero() only
   guarantees control dependency on success vs. fully ordered
   atomic counterpart
 - ring_buffer_put(): decrement in refcount_dec_and_test() only
   provides RELEASE ordering and control dependency on success
   vs. fully ordered atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 kernel/events/core.c| 4 ++--
 kernel/events/internal.h| 3 ++-
 kernel/events/ring_buffer.c | 2 +-
 3 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 29c381f..3497c6a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5020,7 +5020,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event 
*event)
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
-   if (!atomic_inc_not_zero(&rb->refcount))
+   if (!refcount_inc_not_zero(&rb->refcount))
rb = NULL;
}
rcu_read_unlock();
@@ -5030,7 +5030,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event 
*event)
 
 void ring_buffer_put(struct ring_buffer *rb)
 {
-   if (!atomic_dec_and_test(&rb->refcount))
+   if (!refcount_dec_and_test(&rb->refcount))
return;
 
WARN_ON_ONCE(!list_empty(&rb->event_list));
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 09b1537..86c5c7f 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -4,13 +4,14 @@
 
 #include 
 #include 
+#include 
 
 /* Buffer handling */
 
 #define RING_BUFFER_WRITABLE   0x01
 
 struct ring_buffer {
-   atomic_trefcount;
+   refcount_t  refcount;
struct rcu_head rcu_head;
 #ifdef CONFIG_PERF_USE_VMALLOC
struct work_struct  work;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 141aa2c..de12d36 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -284,7 +284,7 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, 
int flags)
else
rb->overwrite = 1;
 
-   atomic_set(&rb->refcount, 1);
+   refcount_set(&rb->refcount, 1);
 
INIT_LIST_HEAD(&rb->event_list);
spin_lock_init(&rb->event_lock);
-- 
2.7.4



[PATCH 13/16] groups: convert group_info.usage to refcount_t

2017-11-15 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable group_info.usage is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the group_info.usage it might make a difference
in following places:
 - put_group_info(): decrement in refcount_dec_and_test() only
   provides RELEASE ordering and control dependency on success
   vs. fully ordered atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/cred.h | 7 ---
 kernel/cred.c| 2 +-
 kernel/groups.c  | 2 +-
 3 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/include/linux/cred.h b/include/linux/cred.h
index 099058e..00948dd 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -17,6 +17,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -28,7 +29,7 @@ struct inode;
  * COW Supplementary groups list
  */
 struct group_info {
-   atomic_tusage;
+   refcount_t  usage;
int ngroups;
kgid_t  gid[0];
 } __randomize_layout;
@@ -44,7 +45,7 @@ struct group_info {
  */
 static inline struct group_info *get_group_info(struct group_info *gi)
 {
-   atomic_inc(&gi->usage);
+   refcount_inc(&gi->usage);
return gi;
 }
 
@@ -54,7 +55,7 @@ static inline struct group_info *get_group_info(struct 
group_info *gi)
  */
 #define put_group_info(group_info) \
 do {   \
-   if (atomic_dec_and_test(&(group_info)->usage))  \
+   if (refcount_dec_and_test(&(group_info)->usage))\
groups_free(group_info);\
 } while (0)
 
diff --git a/kernel/cred.c b/kernel/cred.c
index 0192a94..9604c1a 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -36,7 +36,7 @@ do {  
\
 static struct kmem_cache *cred_jar;
 
 /* init to 2 - one for init_task, one to ensure it is never freed */
-struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
+struct group_info init_groups = { .usage = REFCOUNT_INIT(2) };
 
 /*
  * The initial credentials for the initial task
diff --git a/kernel/groups.c b/kernel/groups.c
index e357bc8..2ab0e56 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -24,7 +24,7 @@ struct group_info *groups_alloc(int gidsetsize)
if (!gi)
return NULL;
 
-   atomic_set(&gi->usage, 1);
+   refcount_set(&gi->usage, 1);
gi->ngroups = gidsetsize;
return gi;
 }
-- 
2.7.4



[PATCH 15/16] kcov: convert kcov.refcount to refcount_t

2017-11-15 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable kcov.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the kcov.refcount it might make a difference
in following places:
 - kcov_put(): decrement in refcount_dec_and_test() only
   provides RELEASE ordering and control dependency on success
   vs. fully ordered atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 kernel/kcov.c | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/kernel/kcov.c b/kernel/kcov.c
index 15f33fa..343288c 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -20,6 +20,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 /* Number of 64-bit words written per one comparison: */
@@ -44,7 +45,7 @@ struct kcov {
 *  - opened file descriptor
 *  - task with enabled coverage (we can't unwire it from another task)
 */
-   atomic_trefcount;
+   refcount_t  refcount;
/* The lock protects mode, size, area and t. */
spinlock_t  lock;
enum kcov_mode  mode;
@@ -228,12 +229,12 @@ EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
 
 static void kcov_get(struct kcov *kcov)
 {
-   atomic_inc(&kcov->refcount);
+   refcount_inc(&kcov->refcount);
 }
 
 static void kcov_put(struct kcov *kcov)
 {
-   if (atomic_dec_and_test(&kcov->refcount)) {
+   if (refcount_dec_and_test(&kcov->refcount)) {
vfree(kcov->area);
kfree(kcov);
}
@@ -311,7 +312,7 @@ static int kcov_open(struct inode *inode, struct file 
*filep)
if (!kcov)
return -ENOMEM;
kcov->mode = KCOV_MODE_DISABLED;
-   atomic_set(&kcov->refcount, 1);
+   refcount_set(&kcov->refcount, 1);
spin_lock_init(&kcov->lock);
filep->private_data = kcov;
return nonseekable_open(inode, filep);
-- 
2.7.4



[PATCH 16/16] bdi: convert bdi_writeback_congested.refcnt from atomic_t to refcount_t

2017-11-15 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable bdi_writeback_congested.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the bdi_writeback_congested.refcnt it might make a difference
in following places:
 - wb_congested_put() in include/linux/backing-dev.h:
   decrement in refcount_dec_and_test() only
   provides RELEASE ordering and control dependency on success
   vs. fully ordered atomic counterpart
 - wb_congested_put() in mm/backing-dev.c: decrement in
   refcount_dec_and_lock() only
   provides RELEASE ordering, control dependency on success
   and hold spin_lock() on success vs. fully ordered atomic
   counterpart. Note, there is no difference in spin lock
   locking.

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/backing-dev-defs.h |  3 ++-
 include/linux/backing-dev.h  |  4 ++--
 mm/backing-dev.c | 14 --
 3 files changed, 12 insertions(+), 9 deletions(-)

diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index bfe86b5..0b1bcce 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -5,6 +5,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -76,7 +77,7 @@ enum wb_reason {
  */
 struct bdi_writeback_congested {
unsigned long state;/* WB_[a]sync_congested flags */
-   atomic_t refcnt;/* nr of attached wb's and blkg */
+   refcount_t refcnt;  /* nr of attached wb's and blkg */
 
 #ifdef CONFIG_CGROUP_WRITEBACK
struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e54e7e0..d6bac2e 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -402,13 +402,13 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
 static inline struct bdi_writeback_congested *
 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
 {
-   atomic_inc(&bdi->wb_congested->refcnt);
+   refcount_inc(&bdi->wb_congested->refcnt);
return bdi->wb_congested;
 }
 
 static inline void wb_congested_put(struct bdi_writeback_congested *congested)
 {
-   if (atomic_dec_and_test(&congested->refcnt))
+   if (refcount_dec_and_test(&congested->refcnt))
kfree(congested);
 }
 
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 74b52df..e92a20f 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -440,14 +440,17 @@ wb_congested_get_create(struct backing_dev_info *bdi, int 
blkcg_id, gfp_t gfp)
node = &parent->rb_left;
else if (congested->blkcg_id > blkcg_id)
node = &parent->rb_right;
-   else
-   goto found;
+   else {
+   refcount_inc(&congested->refcnt);
+   goto found;
+   }
}
 
if (new_congested) {
/* !found and storage for new one already allocated, insert */
congested = new_congested;
new_congested = NULL;
+   refcount_set(&congested->refcnt, 1);
rb_link_node(&congested->rb_node, parent, node);
rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
goto found;
@@ -460,13 +463,12 @@ wb_congested_get_create(struct backing_dev_info *bdi, int 
blkcg_id, gfp_t gfp)
if (!new_congested)
return NULL;
 
-   atomic_set(&new_congested->refcnt, 0);
+   

[PATCH 14/16] creds: convert cred.usage to refcount_t

2017-11-15 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable cred.usage is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the cred.usage it might make a difference
in following places:
 - get_task_cred(): increment in refcount_inc_not_zero() only
   guarantees control dependency on success vs. fully ordered
   atomic counterpart
 - put_cred(): decrement in refcount_dec_and_test() only
   provides RELEASE ordering and control dependency on success
   vs. fully ordered atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/cred.h |  6 +++---
 kernel/cred.c| 44 ++--
 2 files changed, 25 insertions(+), 25 deletions(-)

diff --git a/include/linux/cred.h b/include/linux/cred.h
index 00948dd..a9f217b 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -109,7 +109,7 @@ extern bool may_setgroups(void);
  * same context as task->real_cred.
  */
 struct cred {
-   atomic_tusage;
+   refcount_t  usage;
 #ifdef CONFIG_DEBUG_CREDENTIALS
atomic_tsubscribers;/* number of processes subscribed */
void*put_addr;
@@ -222,7 +222,7 @@ static inline bool cap_ambient_invariant_ok(const struct 
cred *cred)
  */
 static inline struct cred *get_new_cred(struct cred *cred)
 {
-   atomic_inc(&cred->usage);
+   refcount_inc(&cred->usage);
return cred;
 }
 
@@ -262,7 +262,7 @@ static inline void put_cred(const struct cred *_cred)
struct cred *cred = (struct cred *) _cred;
 
validate_creds(cred);
-   if (atomic_dec_and_test(&(cred)->usage))
+   if (refcount_dec_and_test(&(cred)->usage))
__put_cred(cred);
 }
 
diff --git a/kernel/cred.c b/kernel/cred.c
index 9604c1a..86c039a 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -42,7 +42,7 @@ struct group_info init_groups = { .usage = REFCOUNT_INIT(2) };
  * The initial credentials for the initial task
  */
 struct cred init_cred = {
-   .usage  = ATOMIC_INIT(4),
+   .usage  = REFCOUNT_INIT(4),
 #ifdef CONFIG_DEBUG_CREDENTIALS
.subscribers= ATOMIC_INIT(2),
.magic  = CRED_MAGIC,
@@ -101,17 +101,17 @@ static void put_cred_rcu(struct rcu_head *rcu)
 
 #ifdef CONFIG_DEBUG_CREDENTIALS
if (cred->magic != CRED_MAGIC_DEAD ||
-   atomic_read(&cred->usage) != 0 ||
+   refcount_read(&cred->usage) != 0 ||
read_cred_subscribers(cred) != 0)
panic("CRED: put_cred_rcu() sees %p with"
  " mag %x, put %p, usage %d, subscr %d\n",
  cred, cred->magic, cred->put_addr,
- atomic_read(&cred->usage),
+ refcount_read(&cred->usage),
  read_cred_subscribers(cred));
 #else
-   if (atomic_read(&cred->usage) != 0)
+   if (refcount_read(&cred->usage) != 0)
panic("CRED: put_cred_rcu() sees %p with usage %d\n",
- cred, atomic_read(&cred->usage));
+ cred, refcount_read(&cred->usage));
 #endif
 
security_cred_free(cred);
@@ -135,10 +135,10 @@ static void put_cred_rcu(struct rcu_head *rcu)
 void __put_cred(struct cred *cred)
 {
kdebug("__put_cred(%p{%d,%d})", cred,
-  atomic_read(&cred->usage),
+  refcount_read(&cred->usage),
   read_cred_subscribers(cred));
 
-   BUG_ON(atomic_read(&cred->usage) != 0);
+   BUG_ON(refcount_read(&cred->usage) != 0);
 #ifdef

[PATCH 08/16] perf: convert perf_event_context.refcount to refcount_t

2017-11-15 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable perf_event_context.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the perf_event_context.refcount it might make a difference
in following places:
 - get_ctx(), perf_event_ctx_lock_nested(), perf_lock_task_context()
   and __perf_event_ctx_lock_double(): increment in
   refcount_inc_not_zero() only guarantees control dependency
   on success vs. fully ordered atomic counterpart
 - put_ctx(): decrement in refcount_dec_and_test() only
   provides RELEASE ordering and control dependency on success
   vs. fully ordered atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/perf_event.h |  3 ++-
 kernel/events/core.c   | 12 ++--
 2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2c9c87d..6a78705 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -54,6 +54,7 @@ struct perf_guest_info_callbacks {
 #include 
 #include 
 #include 
+#include 
 #include 
 
 struct perf_callchain_entry {
@@ -718,7 +719,7 @@ struct perf_event_context {
int nr_stat;
int nr_freq;
int rotate_disable;
-   atomic_trefcount;
+   refcount_t  refcount;
struct task_struct  *task;
 
/*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d084a97..29c381f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1148,7 +1148,7 @@ static void perf_event_ctx_deactivate(struct 
perf_event_context *ctx)
 
 static void get_ctx(struct perf_event_context *ctx)
 {
-   WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
+   WARN_ON(!refcount_inc_not_zero(&ctx->refcount));
 }
 
 static void free_ctx(struct rcu_head *head)
@@ -1162,7 +1162,7 @@ static void free_ctx(struct rcu_head *head)
 
 static void put_ctx(struct perf_event_context *ctx)
 {
-   if (atomic_dec_and_test(&ctx->refcount)) {
+   if (refcount_dec_and_test(&ctx->refcount)) {
if (ctx->parent_ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task && ctx->task != TASK_TOMBSTONE)
@@ -1240,7 +1240,7 @@ perf_event_ctx_lock_nested(struct perf_event *event, int 
nesting)
 again:
rcu_read_lock();
ctx = READ_ONCE(event->ctx);
-   if (!atomic_inc_not_zero(&ctx->refcount)) {
+   if (!refcount_inc_not_zero(&ctx->refcount)) {
rcu_read_unlock();
goto again;
}
@@ -1373,7 +1373,7 @@ perf_lock_task_context(struct task_struct *task, int 
ctxn, unsigned long *flags)
}
 
if (ctx->task == TASK_TOMBSTONE ||
-   !atomic_inc_not_zero(&ctx->refcount)) {
+   !refcount_inc_not_zero(&ctx->refcount)) {
raw_spin_unlock(&ctx->lock);
ctx = NULL;
} else {
@@ -3715,7 +3715,7 @@ static void __perf_event_init_context(struct 
perf_event_context *ctx)
INIT_LIST_HEAD(&ctx->pinned_groups);
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
-   atomic_set(&ctx->refcount, 1);
+   refcount_set(&ctx->refcount, 1);
 }
 
 static struct perf_event_context *
@@ -9793,7 +9793,7 @@ __perf_event_ctx_lock_double(struct perf_event 
*group_leader,
 again:
rcu_read_lock();
gctx = READ_ONCE(group_leader->ctx);
-   if (!atomic_inc_not_zero

[PATCH 07/16] sched/task_struct: convert task_struct.stack_refcount to refcount_t

2017-11-15 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable task_struct.stack_refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the task_struct.stack_refcount it might make a difference
in following places:
 - try_get_task_stack(): increment in refcount_inc_not_zero() only
   guarantees control dependency on success vs. fully ordered
   atomic counterpart
 - put_task_stack(): decrement in refcount_dec_and_test() only
   provides RELEASE ordering and control dependency on success
   vs. fully ordered atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/init_task.h| 3 ++-
 include/linux/sched.h| 2 +-
 include/linux/sched/task_stack.h | 2 +-
 kernel/fork.c| 6 +++---
 4 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 1e35fce..6a87579 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -13,6 +13,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -207,7 +208,7 @@ extern struct cred init_cred;
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 # define INIT_TASK_TI(tsk) \
.thread_info = INIT_THREAD_INFO(tsk),   \
-   .stack_refcount = ATOMIC_INIT(1),
+   .stack_refcount = REFCOUNT_INIT(1),
 #else
 # define INIT_TASK_TI(tsk)
 #endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 924a812..c8c6d17 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1098,7 +1098,7 @@ struct task_struct {
 #endif
 #ifdef CONFIG_THREAD_INFO_IN_TASK
/* A live task holds one reference: */
-   atomic_tstack_refcount;
+   refcount_t  stack_refcount;
 #endif
 #ifdef CONFIG_LIVEPATCH
int patch_state;
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index cb4828a..4559316 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -61,7 +61,7 @@ static inline unsigned long *end_of_stack(struct task_struct 
*p)
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 static inline void *try_get_task_stack(struct task_struct *tsk)
 {
-   return atomic_inc_not_zero(&tsk->stack_refcount) ?
+   return refcount_inc_not_zero(&tsk->stack_refcount) ?
task_stack_page(tsk) : NULL;
 }
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 16df4f5..822efa2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -362,7 +362,7 @@ static void release_task_stack(struct task_struct *tsk)
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 void put_task_stack(struct task_struct *tsk)
 {
-   if (atomic_dec_and_test(&tsk->stack_refcount))
+   if (refcount_dec_and_test(&tsk->stack_refcount))
release_task_stack(tsk);
 }
 #endif
@@ -380,7 +380,7 @@ void free_task(struct task_struct *tsk)
 * If the task had a separate stack allocation, it should be gone
 * by now.
 */
-   WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0);
+   WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0);
 #endif
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
@@ -795,7 +795,7 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig, int node)
tsk->stack_vm_area = stack_vm_area;
 #endif
 #ifdef CONFIG_THREAD_INFO_IN_TASK
-   atomic_set(&tsk->stack_refcount, 1);
+   refcount_set(&tsk->stack_refcount, 1);
 #endif
 
if (err)
-- 
2.7.4



[PATCH 06/16] sched/task_struct: convert task_struct.usage to refcount_t

2017-11-15 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable task_struct.usage is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the task_struct.usage it might make a difference
in following places:
 - put_task_struct(): decrement in refcount_dec_and_test() only
   provides RELEASE ordering and control dependency on success
   vs. fully ordered atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/init_task.h  | 2 +-
 include/linux/sched.h  | 3 ++-
 include/linux/sched/task.h | 4 ++--
 kernel/fork.c  | 4 ++--
 4 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 9eb2ce8..1e35fce 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -227,7 +227,7 @@ extern struct cred init_cred;
INIT_TASK_TI(tsk)   \
.state  = 0,\
.stack  = init_stack,   \
-   .usage  = ATOMIC_INIT(2),   \
+   .usage  = REFCOUNT_INIT(2), \
.flags  = PF_KTHREAD,   \
.prio   = MAX_PRIO-20,  \
.static_prio= MAX_PRIO-20,  \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 44f9df5..924a812 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -21,6 +21,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -536,7 +537,7 @@ struct task_struct {
randomized_struct_fields_start
 
void*stack;
-   atomic_tusage;
+   refcount_t  usage;
/* Per task flags (PF_*), defined further below: */
unsigned intflags;
unsigned intptrace;
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 5be31eb..dae8d04 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -86,13 +86,13 @@ extern void sched_exec(void);
 #define sched_exec()   {}
 #endif
 
-#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+#define get_task_struct(tsk) do { refcount_inc(&(tsk)->usage); } while(0)
 
 extern void __put_task_struct(struct task_struct *t);
 
 static inline void put_task_struct(struct task_struct *t)
 {
-   if (atomic_dec_and_test(&t->usage))
+   if (refcount_dec_and_test(&t->usage))
__put_task_struct(t);
 }
 
diff --git a/kernel/fork.c b/kernel/fork.c
index a65ec7d..16df4f5 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -649,7 +649,7 @@ static inline void put_signal_struct(struct signal_struct 
*sig)
 void __put_task_struct(struct task_struct *tsk)
 {
WARN_ON(!tsk->exit_state);
-   WARN_ON(atomic_read(&tsk->usage));
+   WARN_ON(refcount_read(&tsk->usage));
WARN_ON(tsk == current);
 
cgroup_free(tsk);
@@ -824,7 +824,7 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig, int node)
 * One for us, one for whoever does the "release_task()" (usually
 * parent)
 */
-   atomic_set(&tsk->usage, 2);
+   refcount_set(&tsk->usage, 2);
 #ifdef CONFIG_BLK_DEV_IO_TRACE
tsk->btrace_seq = 0;
 #endif
-- 
2.7.4



[PATCH 11/16] uprobes: convert uprobe.ref to refcount_t

2017-11-16 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable uprobe.ref is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the uprobe.ref it might make a difference
in following places:
 - put_uprobe(): decrement in refcount_dec_and_test() only
   provides RELEASE ordering and control dependency on success
   vs. fully ordered atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 kernel/events/uprobes.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 8d42d8f..3514b42 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -66,7 +66,7 @@ static struct percpu_rw_semaphore dup_mmap_sem;
 
 struct uprobe {
struct rb_node  rb_node;/* node in the rb tree */
-   atomic_tref;
+   refcount_t  ref;
struct rw_semaphore register_rwsem;
struct rw_semaphore consumer_rwsem;
struct list_headpending_list;
@@ -371,13 +371,13 @@ set_orig_insn(struct arch_uprobe *auprobe, struct 
mm_struct *mm, unsigned long v
 
 static struct uprobe *get_uprobe(struct uprobe *uprobe)
 {
-   atomic_inc(&uprobe->ref);
+   refcount_inc(&uprobe->ref);
return uprobe;
 }
 
 static void put_uprobe(struct uprobe *uprobe)
 {
-   if (atomic_dec_and_test(&uprobe->ref))
+   if (refcount_dec_and_test(&uprobe->ref))
kfree(uprobe);
 }
 
@@ -459,7 +459,7 @@ static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
rb_link_node(&uprobe->rb_node, parent, p);
rb_insert_color(&uprobe->rb_node, &uprobes_tree);
/* get access + creation ref */
-   atomic_set(&uprobe->ref, 2);
+   refcount_set(&uprobe->ref, 2);
 
return u;
 }
-- 
2.7.4



[PATCH 00/16] v6 kernel core pieces refcount conversions

2017-11-16 Thread Elena Reshetova
Changes in v6:
 * memory ordering differences are outlined in each patch
   together with potential problematic areas.
  Note: I didn't include any statements in individual patches
  on why I think the memory ordering changes do not matter
  in that particular case since ultimately these are only
  known by maintainers (unless explicitly documented) and
  very hard to figure out reliably from the code.
  Therefore maintainers are expected to double check the
  specific pointed functions and make the end decision.
 * rebase on top of today's linux-next/master  
 

Changes in v5:
 * Kees catched that the following changes in
   perf_event_context.refcount and futex_pi_state.refcount
   are not correct now when ARCH_HAS_REFCOUNT is enabled:
-   WARN_ON(!atomic_inc_not_zero(refcount));
+   refcount_inc(refcount);
   So they are now changed back to using refcount_inc_not_zero. 

Changes in v4:
 * just rebase and corrections on linux-next/master

Changes in v3:
 * SoB chain corrected
 * minor corrections based on v2 feedback
 * rebase on linux-next/master as of today

Changes in v2:
 * dropped already merged patches
 * rebase on top of linux-next/master
 * Now by default refcount_t = atomic_t (*) and uses all atomic
   standard operations unless CONFIG_REFCOUNT_FULL is enabled.
   This is a compromise for the systems that are critical on
   performance (such as net) and cannot accept even slight delay
   on the refcounter operations.

This series, for core kernel components, replaces atomic_t reference
counters with the new refcount_t type and API (see include/linux/refcount.h).
By doing this we prevent intentional or accidental
underflows or overflows that can led to use-after-free vulnerabilities.

The patches are fully independent and can be cherry-picked separately.
If there are no objections to the patches, please merge them via respective 
trees.


Elena Reshetova (16):
  futex: convert futex_pi_state.refcount to refcount_t
  sched: convert sighand_struct.count to refcount_t
  sched: convert signal_struct.sigcnt to refcount_t
  sched: convert user_struct.__count to refcount_t
  sched: convert numa_group.refcount to refcount_t
  sched/task_struct: convert task_struct.usage to refcount_t
  sched/task_struct: convert task_struct.stack_refcount to refcount_t
  perf: convert perf_event_context.refcount to refcount_t
  perf/ring_buffer: convert ring_buffer.refcount to refcount_t
  perf/ring_buffer: convert ring_buffer.aux_refcount to refcount_t
  uprobes: convert uprobe.ref to refcount_t
  nsproxy: convert nsproxy.count to refcount_t
  groups: convert group_info.usage to refcount_t
  creds: convert cred.usage to refcount_t
  kcov: convert kcov.refcount to refcount_t
  bdi: convert bdi_writeback_congested.refcnt from atomic_t to
refcount_t

 fs/exec.c|  4 ++--
 fs/proc/task_nommu.c |  2 +-
 include/linux/backing-dev-defs.h |  3 ++-
 include/linux/backing-dev.h  |  4 ++--
 include/linux/cred.h | 13 ++--
 include/linux/init_task.h|  7 +++---
 include/linux/nsproxy.h  |  6 +++---
 include/linux/perf_event.h   |  3 ++-
 include/linux/sched.h|  5 +++--
 include/linux/sched/signal.h |  5 +++--
 include/linux/sched/task.h   |  4 ++--
 include/linux/sched/task_stack.h |  2 +-
 include/linux/sched/user.h   |  5 +++--
 kernel/cred.c| 46 
 kernel/events/core.c | 18 
 kernel/events/internal.h |  5 +++--
 kernel/events/ring_buffer.c  |  8 +++
 kernel/events/uprobes.c  |  8 +++
 kernel/fork.c| 24 ++---
 kernel/futex.c   | 15 +++--
 kernel/groups.c  |  2 +-
 kernel/kcov.c|  9 
 kernel/nsproxy.c |  6 +++---
 kernel/sched/fair.c  | 12 +--
 kernel/user.c|  8 +++
 mm/backing-dev.c | 14 ++--
 26 files changed, 125 insertions(+), 113 deletions(-)

-- 
2.7.4



[PATCH 12/16] nsproxy: convert nsproxy.count to refcount_t

2017-11-16 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable nsproxy.count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts.
The full comparison can be seen in
https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon
in state to be merged to the documentation tree.
Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the nsproxy.count it might make a difference
in following places:
 - put_nsproxy() and switch_task_namespaces(): decrement in
   refcount_dec_and_test() only provides RELEASE ordering
   and control dependency on success vs. fully ordered
   atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 include/linux/nsproxy.h | 6 +++---
 kernel/nsproxy.c| 6 +++---
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 2ae1b1a..90f09ff 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -29,7 +29,7 @@ struct fs_struct;
  * nsproxy is copied.
  */
 struct nsproxy {
-   atomic_t count;
+   refcount_t count;
struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
@@ -75,14 +75,14 @@ int __init nsproxy_cache_init(void);
 
 static inline void put_nsproxy(struct nsproxy *ns)
 {
-   if (atomic_dec_and_test(&ns->count)) {
+   if (refcount_dec_and_test(&ns->count)) {
free_nsproxy(ns);
}
 }
 
 static inline void get_nsproxy(struct nsproxy *ns)
 {
-   atomic_inc(&ns->count);
+   refcount_inc(&ns->count);
 }
 
 #endif
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index f6c5d33..5bfe691 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -31,7 +31,7 @@
 static struct kmem_cache *nsproxy_cachep;
 
 struct nsproxy init_nsproxy = {
-   .count  = ATOMIC_INIT(1),
+   .count  = REFCOUNT_INIT(1),
.uts_ns = &init_uts_ns,
 #if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
.ipc_ns = &init_ipc_ns,
@@ -52,7 +52,7 @@ static inline struct nsproxy *create_nsproxy(void)
 
nsproxy = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL);
if (nsproxy)
-   atomic_set(&nsproxy->count, 1);
+   refcount_set(&nsproxy->count, 1);
return nsproxy;
 }
 
@@ -225,7 +225,7 @@ void switch_task_namespaces(struct task_struct *p, struct 
nsproxy *new)
p->nsproxy = new;
task_unlock(p);
 
-   if (ns && atomic_dec_and_test(&ns->count))
+   if (ns && refcount_dec_and_test(&ns->count))
free_nsproxy(ns);
 }
 
-- 
2.7.4



[PATCH] refcount_t: add ACQUIRE ordering on success for dec(sub)_and_test variants

2019-01-28 Thread Elena Reshetova
This adds an smp_acquire__after_ctrl_dep() barrier on successful
decrease of refcounter value from 1 to 0 for refcount_dec(sub)_and_test
variants and therefore gives stronger memory ordering guarantees than
prior versions of these functions.

Co-Developed-by: Peter Zijlstra (Intel) 
Signed-off-by: Elena Reshetova 
---
 Documentation/core-api/refcount-vs-atomic.rst | 28 +++
 arch/x86/include/asm/refcount.h   | 21 
 lib/refcount.c| 16 ++-
 3 files changed, 52 insertions(+), 13 deletions(-)

diff --git a/Documentation/core-api/refcount-vs-atomic.rst 
b/Documentation/core-api/refcount-vs-atomic.rst
index 322851b..95d4b4e 100644
--- a/Documentation/core-api/refcount-vs-atomic.rst
+++ b/Documentation/core-api/refcount-vs-atomic.rst
@@ -54,6 +54,14 @@ must propagate to all other CPUs before the release operation
 (A-cumulative property). This is implemented using
 :c:func:`smp_store_release`.
 
+An ACQUIRE memory ordering guarantees that all post loads and
+stores (all po-later instructions) on the same CPU are
+completed after the acquire operation. It also guarantees that all
+po-later stores on the same CPU and all propagated stores from other CPUs
+must propagate to all other CPUs after the acquire operation
+(A-cumulative property). This is implemented using
+:c:func:`smp_acquire__after_ctrl_dep`.
+
 A control dependency (on success) for refcounters guarantees that
 if a reference for an object was successfully obtained (reference
 counter increment or addition happened, function returned true),
@@ -119,24 +127,36 @@ Memory ordering guarantees changes:
result of obtaining pointer to the object!
 
 
-case 5) - decrement-based RMW ops that return a value
--
+case 5) - generic dec/sub decrement-based RMW ops that return a value
+-
 
 Function changes:
 
  * :c:func:`atomic_dec_and_test` --> :c:func:`refcount_dec_and_test`
  * :c:func:`atomic_sub_and_test` --> :c:func:`refcount_sub_and_test`
+
+Memory ordering guarantees changes:
+
+ * fully ordered --> RELEASE ordering + ACQUIRE ordering and control dependency
+   on success.  
+
+
+case 6) other decrement-based RMW ops that return a value
+-
+
+Function changes:
+
  * no atomic counterpart --> :c:func:`refcount_dec_if_one`
  * ``atomic_add_unless(&var, -1, 1)`` --> ``refcount_dec_not_one(&var)``
 
 Memory ordering guarantees changes:
 
- * fully ordered --> RELEASE ordering + control dependency
+ * fully ordered --> RELEASE ordering + control dependency 
 
 .. note:: :c:func:`atomic_add_unless` only provides full order on success.
 
 
-case 6) - lock-based RMW
+case 7) - lock-based RMW
 
 
 Function changes:
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
index dbaed55..ab8f584 100644
--- a/arch/x86/include/asm/refcount.h
+++ b/arch/x86/include/asm/refcount.h
@@ -67,16 +67,29 @@ static __always_inline void refcount_dec(refcount_t *r)
 static __always_inline __must_check
 bool refcount_sub_and_test(unsigned int i, refcount_t *r)
 {
-   return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl",
+   bool ret = GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl",
 REFCOUNT_CHECK_LT_ZERO,
 r->refs.counter, e, "er", i, "cx");
+
+if (ret) {
+   smp_acquire__after_ctrl_dep();
+   return true;
+}
+
+return false;
 }
 
 static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
 {
-   return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl",
-   REFCOUNT_CHECK_LT_ZERO,
-   r->refs.counter, e, "cx");
+   bool ret = GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl",
+   REFCOUNT_CHECK_LT_ZERO,
+   r->refs.counter, e, "cx");
+if (ret) {
+   smp_acquire__after_ctrl_dep();
+   return true;
+}
+
+return false;
 }
 
 static __always_inline __must_check
diff --git a/lib/refcount.c b/lib/refcount.c
index ebcf8cd..732feac 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -33,6 +33,9 @@
  * Note that the allocator is responsible for ordering things between free()
  * and alloc().
  *
+ * The decrements dec_and_test() and sub_and_test() also provide acquire
+ * ordering on success. 
+ *
  */
 
 #include 
@@ -164,8 +167,7 @@ EXPORT_SYMBOL(refcount_inc_checked);
  * at UINT_MAX.
  *
  * Provides release memory ordering, such that prior loads and stores are done
- * before, and provides a control dependency such that free() must come after.
- * See the comment on top.
+ * befo

[PATCH 2/3] perf/ring_buffer: convert ring_buffer.refcount to refcount_t

2019-01-28 Thread Elena Reshetova
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable ring_buffer.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

**Important note for maintainers:

Some functions from refcount_t API defined in lib/refcount.c
have different memory ordering guarantees than their atomic
counterparts. Please check Documentation/core-api/refcount-vs-atomic.rst
for more information.

Normally the differences should not matter since refcount_t provides
enough guarantees to satisfy the refcounting use cases, but in
some rare cases it might matter.
Please double check that you don't have some undocumented
memory guarantees for this variable usage.

For the ring_buffer.refcount it might make a difference
in following places:
 - ring_buffer_get(): increment in refcount_inc_not_zero() only
   guarantees control dependency on success vs. fully ordered
   atomic counterpart
 - ring_buffer_put(): decrement in refcount_dec_and_test() only
   provides RELEASE ordering and ACQUIRE ordering + control dependency
   on success vs. fully ordered atomic counterpart

Suggested-by: Kees Cook 
Reviewed-by: David Windsor 
Reviewed-by: Hans Liljestrand 
Signed-off-by: Elena Reshetova 
---
 kernel/events/core.c| 4 ++--
 kernel/events/internal.h| 3 ++-
 kernel/events/ring_buffer.c | 2 +-
 3 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index a1e87d2..963cee0 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5388,7 +5388,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event 
*event)
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
-   if (!atomic_inc_not_zero(&rb->refcount))
+   if (!refcount_inc_not_zero(&rb->refcount))
rb = NULL;
}
rcu_read_unlock();
@@ -5398,7 +5398,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event 
*event)
 
 void ring_buffer_put(struct ring_buffer *rb)
 {
-   if (!atomic_dec_and_test(&rb->refcount))
+   if (!refcount_dec_and_test(&rb->refcount))
return;
 
WARN_ON_ONCE(!list_empty(&rb->event_list));
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 6dc725a..4718de2 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -4,13 +4,14 @@
 
 #include 
 #include 
+#include 
 
 /* Buffer handling */
 
 #define RING_BUFFER_WRITABLE   0x01
 
 struct ring_buffer {
-   atomic_trefcount;
+   refcount_t  refcount;
struct rcu_head rcu_head;
 #ifdef CONFIG_PERF_USE_VMALLOC
struct work_struct  work;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 4a99370..e841d48 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -285,7 +285,7 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, 
int flags)
else
rb->overwrite = 1;
 
-   atomic_set(&rb->refcount, 1);
+   refcount_set(&rb->refcount, 1);
 
INIT_LIST_HEAD(&rb->event_list);
spin_lock_init(&rb->event_lock);
-- 
2.7.4



  1   2   3   4   5   6   7   >