diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 624ef3f..9e36680 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -107,7 +107,8 @@
  * used in the general case to determine whether sleeping is possible.
  * Do not use in_atomic() in driver code.
  */
-#define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != 0)
+#define preempt_offset_in_atomic(offset)	((offset & ~PREEMPT_ACTIVE) != 0)
+#define in_atomic()	preempt_offset_in_atomic(preempt_count())
 
 /*
  * Check whether we were atomic before we did preempt_disable():
@@ -118,10 +119,8 @@
 
 #ifdef CONFIG_PREEMPT_COUNT
 # define preemptible()	(preempt_count() == 0 && !irqs_disabled())
-# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
 #else
 # define preemptible()	0
-# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
 #endif
 
 #if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ed567ba..b14e40e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -320,20 +320,45 @@ void irq_enter(void)
 	__irq_enter();
 }
 
+/*
+ * Invoce softirq's from irq_exit().
+ *
+ * Return the preempt offset: either IRQ_EXIT_OFFSET (if we
+ * did nothing to the preemption count) or SOFTIRQ_OFFSET (in
+ * case we did softirq processing and changed the preemption
+ * count to account for that).
+ */
 static inline void invoke_softirq(void)
 {
-	if (!force_irqthreads) {
+	/* Can we run softirq's at all? We migth be nesting interrupts */
+	if (preempt_offset_in_interrupt(preempt_count() - HARDIRQ_OFFSET))
+		return;
+
+	/* Are there any pending? */
+	if (!local_softirq_pending())
+		return;
+
+	/* Do we force irq threads? If so, just wake up the daemon */
+	if (force_irqthreads) {
+		wakeup_softirqd();
+		return;
+	}
+
+	/*
+	 * Ok, do actual softirq handling!
+	 *
+	 * This downgrades us from hardirq context to softirq context.
+	 */
+	preempt_count() += SOFTIRQ_OFFSET - HARDIRQ_OFFSET;
+
+	trace_softirqs_off(__builtin_return_address(0));
 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
-		__do_softirq();
+	__do_softirq();
 #else
-		do_softirq();
+	do_softirq();
 #endif
-	} else {
-		__local_bh_disable((unsigned long)__builtin_return_address(0),
-				SOFTIRQ_OFFSET);
-		wakeup_softirqd();
-		__local_bh_enable(SOFTIRQ_OFFSET);
-	}
+	preempt_count() += HARDIRQ_OFFSET - SOFTIRQ_OFFSET;
+	trace_softirqs_on((unsigned long)__builtin_return_address(0));
 }
 
 /*
@@ -343,17 +368,19 @@ void irq_exit(void)
 {
 	vtime_account_irq_exit(current);
 	trace_hardirq_exit();
-	sub_preempt_count(IRQ_EXIT_OFFSET);
-	if (!in_interrupt() && local_softirq_pending())
-		invoke_softirq();
+	invoke_softirq();
 
 #ifdef CONFIG_NO_HZ
 	/* Make sure that timer wheel updates are propagated */
-	if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
-		tick_nohz_irq_exit();
+	if (idle_cpu(smp_processor_id())) {
+		int offset = preempt_count() - HARDIRQ_OFFSET;
+
+		if (!preempt_offset_in_interrupt(offset) && !need_resched())
+			tick_nohz_irq_exit();
+	}
 #endif
 	rcu_irq_exit();
-	sched_preempt_enable_no_resched();
+	sub_preempt_count(HARDIRQ_OFFSET);
 }
 
 /*
