Patches contributed by Eötvös Lorand University


commit c5905afb0ee6550b42c49213da1c22d67316c194
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Feb 24 08:31:31 2012 +0100

    static keys: Introduce 'struct static_key', static_key_true()/false() and static_key_slow_[inc|dec]()
    
    So here's a boot tested patch on top of Jason's series that does
    all the cleanups I talked about and turns jump labels into a
    more intuitive to use facility. It should also address the
    various misconceptions and confusions that surround jump labels.
    
    Typical usage scenarios:
    
            #include <linux/static_key.h>
    
            struct static_key key = STATIC_KEY_INIT_TRUE;
    
            if (static_key_false(&key))
                    do unlikely code
            else
                    do likely code
    
    Or:
    
            if (static_key_true(&key))
                    do likely code
            else
                    do unlikely code
    
    The static key is modified via:
    
            static_key_slow_inc(&key);
            ...
            static_key_slow_dec(&key);
    
    The 'slow' prefix makes it abundantly clear that this is an
    expensive operation.
    
    I've updated all in-kernel code to use this everywhere. Note
    that I (intentionally) have not pushed through the rename
    blindly through to the lowest levels: the actual jump-label
    patching arch facility should be named like that, so we want to
    decouple jump labels from the static-key facility a bit.
    
    On non-jump-label enabled architectures static keys default to
    likely()/unlikely() branches.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Acked-by: Jason Baron <jbaron@redhat.com>
    Acked-by: Steven Rostedt <rostedt@goodmis.org>
    Cc: a.p.zijlstra@chello.nl
    Cc: mathieu.desnoyers@efficios.com
    Cc: davem@davemloft.net
    Cc: ddaney.cavm@gmail.com
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Link: http://lkml.kernel.org/r/20120222085809.GA26397@elte.hu
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/arch/Kconfig b/arch/Kconfig
index 4f55c736be11..5b448a74d0f7 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -47,18 +47,29 @@ config KPROBES
 	  If in doubt, say "N".
 
 config JUMP_LABEL
-       bool "Optimize trace point call sites"
+       bool "Optimize very unlikely/likely branches"
        depends on HAVE_ARCH_JUMP_LABEL
        help
+         This option enables a transparent branch optimization that
+	 makes certain almost-always-true or almost-always-false branch
+	 conditions even cheaper to execute within the kernel.
+
+	 Certain performance-sensitive kernel code, such as trace points,
+	 scheduler functionality, networking code and KVM have such
+	 branches and include support for this optimization technique.
+
          If it is detected that the compiler has support for "asm goto",
-	 the kernel will compile trace point locations with just a
-	 nop instruction. When trace points are enabled, the nop will
-	 be converted to a jump to the trace function. This technique
-	 lowers overhead and stress on the branch prediction of the
-	 processor.
-
-	 On i386, options added to the compiler flags may increase
-	 the size of the kernel slightly.
+	 the kernel will compile such branches with just a nop
+	 instruction. When the condition flag is toggled to true, the
+	 nop will be converted to a jump instruction to execute the
+	 conditional block of instructions.
+
+	 This technique lowers overhead and stress on the branch prediction
+	 of the processor and generally makes the kernel faster. The update
+	 of the condition is slower, but those are always very rare.
+
+	 ( On 32-bit x86, the necessary options added to the compiler
+	   flags may increase the size of the kernel slightly. )
 
 config OPTPROBES
 	def_bool y
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
index 32551d304cd7..b149b88ea795 100644
--- a/arch/ia64/include/asm/paravirt.h
+++ b/arch/ia64/include/asm/paravirt.h
@@ -281,9 +281,9 @@ paravirt_init_missing_ticks_accounting(int cpu)
 		pv_time_ops.init_missing_ticks_accounting(cpu);
 }
 
-struct jump_label_key;
-extern struct jump_label_key paravirt_steal_enabled;
-extern struct jump_label_key paravirt_steal_rq_enabled;
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
 
 static inline int
 paravirt_do_steal_accounting(unsigned long *new_itm)
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
index 100868216c55..1b22f6de2932 100644
--- a/arch/ia64/kernel/paravirt.c
+++ b/arch/ia64/kernel/paravirt.c
@@ -634,8 +634,8 @@ struct pv_irq_ops pv_irq_ops = {
  * pv_time_ops
  * time operations
  */
-struct jump_label_key paravirt_steal_enabled;
-struct jump_label_key paravirt_steal_rq_enabled;
+struct static_key paravirt_steal_enabled;
+struct static_key paravirt_steal_rq_enabled;
 
 static int
 ia64_native_do_steal_accounting(unsigned long *new_itm)
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 1881b316ca45..4d6d77ed9b9d 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -20,7 +20,7 @@
 #define WORD_INSN ".word"
 #endif
 
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
 {
 	asm goto("1:\tnop\n\t"
 		"nop\n\t"
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 938986e412f1..ae098c438f00 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -17,7 +17,7 @@
 #define JUMP_ENTRY_TYPE		stringify_in_c(FTR_ENTRY_LONG)
 #define JUMP_LABEL_NOP_SIZE	4
 
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
 {
 	asm goto("1:\n\t"
 		 "nop\n\t"
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 95a6cf2b5b67..6c32190dc73e 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -13,7 +13,7 @@
 #define ASM_ALIGN ".balign 4"
 #endif
 
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
 {
 	asm goto("0:	brcl 0,0\n"
 		".pushsection __jump_table, \"aw\"\n"
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index fc73a82366f8..5080d16a832f 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -7,7 +7,7 @@
 
 #define JUMP_LABEL_NOP_SIZE 4
 
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
 {
 		asm goto("1:\n\t"
 			 "nop\n\t"
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index a32b18ce6ead..3a16c1483b45 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -9,12 +9,12 @@
 
 #define JUMP_LABEL_NOP_SIZE 5
 
-#define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
+#define STATIC_KEY_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
 
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
 {
 	asm goto("1:"
-		JUMP_LABEL_INITIAL_NOP
+		STATIC_KEY_INITIAL_NOP
 		".pushsection __jump_table,  \"aw\" \n\t"
 		_ASM_ALIGN "\n\t"
 		_ASM_PTR "1b, %l[l_yes], %c0 \n\t"
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index a7d2db9a74fb..c0180fd372d2 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -230,9 +230,9 @@ static inline unsigned long long paravirt_sched_clock(void)
 	return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
 }
 
-struct jump_label_key;
-extern struct jump_label_key paravirt_steal_enabled;
-extern struct jump_label_key paravirt_steal_rq_enabled;
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
 
 static inline u64 paravirt_steal_clock(int cpu)
 {
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index f0c6fd6f176b..694d801bf606 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -438,9 +438,9 @@ void __init kvm_guest_init(void)
 static __init int activate_jump_labels(void)
 {
 	if (has_steal_clock) {
-		jump_label_inc(&paravirt_steal_enabled);
+		static_key_slow_inc(&paravirt_steal_enabled);
 		if (steal_acc)
-			jump_label_inc(&paravirt_steal_rq_enabled);
+			static_key_slow_inc(&paravirt_steal_rq_enabled);
 	}
 
 	return 0;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index d90272e6bc40..ada2f99388dd 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -202,8 +202,8 @@ static void native_flush_tlb_single(unsigned long addr)
 	__native_flush_tlb_single(addr);
 }
 
-struct jump_label_key paravirt_steal_enabled;
-struct jump_label_key paravirt_steal_rq_enabled;
+struct static_key paravirt_steal_enabled;
+struct static_key paravirt_steal_rq_enabled;
 
 static u64 native_steal_clock(int cpu)
 {
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index fe15dcc07a6b..ea7b4fd34676 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -234,7 +234,7 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
 }
 
 static bool mmu_audit;
-static struct jump_label_key mmu_audit_key;
+static struct static_key mmu_audit_key;
 
 static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
 {
@@ -250,7 +250,7 @@ static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
 
 static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
 {
-	if (static_branch((&mmu_audit_key)))
+	if (static_key_false((&mmu_audit_key)))
 		__kvm_mmu_audit(vcpu, point);
 }
 
@@ -259,7 +259,7 @@ static void mmu_audit_enable(void)
 	if (mmu_audit)
 		return;
 
-	jump_label_inc(&mmu_audit_key);
+	static_key_slow_inc(&mmu_audit_key);
 	mmu_audit = true;
 }
 
@@ -268,7 +268,7 @@ static void mmu_audit_disable(void)
 	if (!mmu_audit)
 		return;
 
-	jump_label_dec(&mmu_audit_key);
+	static_key_slow_dec(&mmu_audit_key);
 	mmu_audit = false;
 }
 
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f7c69580fea7..2172da2d9bb4 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -9,15 +9,15 @@
  *
  * Jump labels provide an interface to generate dynamic branches using
  * self-modifying code. Assuming toolchain and architecture support the result
- * of a "if (static_branch(&key))" statement is a unconditional branch (which
+ * of a "if (static_key_false(&key))" statement is a unconditional branch (which
  * defaults to false - and the true block is placed out of line).
  *
- * However at runtime we can change the 'static' branch target using
- * jump_label_{inc,dec}(). These function as a 'reference' count on the key
+ * However at runtime we can change the branch target using
+ * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
  * object and for as long as there are references all branches referring to
  * that particular key will point to the (out of line) true block.
  *
- * Since this relies on modifying code the jump_label_{inc,dec}() functions
+ * Since this relies on modifying code the static_key_slow_{inc,dec}() functions
  * must be considered absolute slow paths (machine wide synchronization etc.).
  * OTOH, since the affected branches are unconditional their runtime overhead
  * will be absolutely minimal, esp. in the default (off) case where the total
@@ -26,12 +26,26 @@
  *
  * When the control is directly exposed to userspace it is prudent to delay the
  * decrement to avoid high frequency code modifications which can (and do)
- * cause significant performance degradation. Struct jump_label_key_deferred and
- * jump_label_dec_deferred() provide for this.
+ * cause significant performance degradation. Struct static_key_deferred and
+ * static_key_slow_dec_deferred() provide for this.
  *
  * Lacking toolchain and or architecture support, it falls back to a simple
  * conditional branch.
- */
+ *
+ * struct static_key my_key = STATIC_KEY_INIT_TRUE;
+ *
+ *   if (static_key_true(&my_key)) {
+ *   }
+ *
+ * will result in the true case being in-line and starts the key with a single
+ * reference. Mixing static_key_true() and static_key_false() on the same key is not
+ * allowed.
+ *
+ * Not initializing the key (static data is initialized to 0s anyway) is the
+ * same as using STATIC_KEY_INIT_FALSE and static_key_false() is
+ * equivalent with static_branch().
+ *
+*/
 
 #include <linux/types.h>
 #include <linux/compiler.h>
@@ -39,16 +53,17 @@
 
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
 
-struct jump_label_key {
+struct static_key {
 	atomic_t enabled;
+/* Set lsb bit to 1 if branch is default true, 0 ot */
 	struct jump_entry *entries;
 #ifdef CONFIG_MODULES
-	struct jump_label_mod *next;
+	struct static_key_mod *next;
 #endif
 };
 
-struct jump_label_key_deferred {
-	struct jump_label_key key;
+struct static_key_deferred {
+	struct static_key key;
 	unsigned long timeout;
 	struct delayed_work work;
 };
@@ -66,13 +81,34 @@ struct module;
 
 #ifdef HAVE_JUMP_LABEL
 
-#ifdef CONFIG_MODULES
-#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL, NULL}
-#else
-#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL}
-#endif
+#define JUMP_LABEL_TRUE_BRANCH 1UL
+
+static
+inline struct jump_entry *jump_label_get_entries(struct static_key *key)
+{
+	return (struct jump_entry *)((unsigned long)key->entries
+						& ~JUMP_LABEL_TRUE_BRANCH);
+}
+
+static inline bool jump_label_get_branch_default(struct static_key *key)
+{
+	if ((unsigned long)key->entries & JUMP_LABEL_TRUE_BRANCH)
+		return true;
+	return false;
+}
+
+static __always_inline bool static_key_false(struct static_key *key)
+{
+	return arch_static_branch(key);
+}
 
-static __always_inline bool static_branch(struct jump_label_key *key)
+static __always_inline bool static_key_true(struct static_key *key)
+{
+	return !static_key_false(key);
+}
+
+/* Deprecated. Please use 'static_key_false() instead. */
+static __always_inline bool static_branch(struct static_key *key)
 {
 	return arch_static_branch(key);
 }
@@ -88,21 +124,24 @@ extern void arch_jump_label_transform(struct jump_entry *entry,
 extern void arch_jump_label_transform_static(struct jump_entry *entry,
 					     enum jump_label_type type);
 extern int jump_label_text_reserved(void *start, void *end);
-extern void jump_label_inc(struct jump_label_key *key);
-extern void jump_label_dec(struct jump_label_key *key);
-extern void jump_label_dec_deferred(struct jump_label_key_deferred *key);
-extern bool jump_label_enabled(struct jump_label_key *key);
+extern void static_key_slow_inc(struct static_key *key);
+extern void static_key_slow_dec(struct static_key *key);
+extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
+extern bool static_key_enabled(struct static_key *key);
 extern void jump_label_apply_nops(struct module *mod);
-extern void jump_label_rate_limit(struct jump_label_key_deferred *key,
-		unsigned long rl);
+extern void
+jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
+
+#define STATIC_KEY_INIT_TRUE ((struct static_key) \
+	{ .enabled = ATOMIC_INIT(1), .entries = (void *)1 })
+#define STATIC_KEY_INIT_FALSE ((struct static_key) \
+	{ .enabled = ATOMIC_INIT(0), .entries = (void *)0 })
 
 #else  /* !HAVE_JUMP_LABEL */
 
 #include <linux/atomic.h>
 
-#define JUMP_LABEL_INIT {ATOMIC_INIT(0)}
-
-struct jump_label_key {
+struct static_key {
 	atomic_t enabled;
 };
 
@@ -110,30 +149,45 @@ static __always_inline void jump_label_init(void)
 {
 }
 
-struct jump_label_key_deferred {
-	struct jump_label_key  key;
+struct static_key_deferred {
+	struct static_key  key;
 };
 
-static __always_inline bool static_branch(struct jump_label_key *key)
+static __always_inline bool static_key_false(struct static_key *key)
+{
+	if (unlikely(atomic_read(&key->enabled)) > 0)
+		return true;
+	return false;
+}
+
+static __always_inline bool static_key_true(struct static_key *key)
 {
-	if (unlikely(atomic_read(&key->enabled)))
+	if (likely(atomic_read(&key->enabled)) > 0)
 		return true;
 	return false;
 }
 
-static inline void jump_label_inc(struct jump_label_key *key)
+/* Deprecated. Please use 'static_key_false() instead. */
+static __always_inline bool static_branch(struct static_key *key)
+{
+	if (unlikely(atomic_read(&key->enabled)) > 0)
+		return true;
+	return false;
+}
+
+static inline void static_key_slow_inc(struct static_key *key)
 {
 	atomic_inc(&key->enabled);
 }
 
-static inline void jump_label_dec(struct jump_label_key *key)
+static inline void static_key_slow_dec(struct static_key *key)
 {
 	atomic_dec(&key->enabled);
 }
 
-static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key)
+static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
 {
-	jump_label_dec(&key->key);
+	static_key_slow_dec(&key->key);
 }
 
 static inline int jump_label_text_reserved(void *start, void *end)
@@ -144,9 +198,9 @@ static inline int jump_label_text_reserved(void *start, void *end)
 static inline void jump_label_lock(void) {}
 static inline void jump_label_unlock(void) {}
 
-static inline bool jump_label_enabled(struct jump_label_key *key)
+static inline bool static_key_enabled(struct static_key *key)
 {
-	return !!atomic_read(&key->enabled);
+	return (atomic_read(&key->enabled) > 0);
 }
 
 static inline int jump_label_apply_nops(struct module *mod)
@@ -154,13 +208,20 @@ static inline int jump_label_apply_nops(struct module *mod)
 	return 0;
 }
 
-static inline void jump_label_rate_limit(struct jump_label_key_deferred *key,
+static inline void
+jump_label_rate_limit(struct static_key_deferred *key,
 		unsigned long rl)
 {
 }
+
+#define STATIC_KEY_INIT_TRUE ((struct static_key) \
+		{ .enabled = ATOMIC_INIT(1) })
+#define STATIC_KEY_INIT_FALSE ((struct static_key) \
+		{ .enabled = ATOMIC_INIT(0) })
+
 #endif	/* HAVE_JUMP_LABEL */
 
-#define jump_label_key_enabled	((struct jump_label_key){ .enabled = ATOMIC_INIT(1), })
-#define jump_label_key_disabled	((struct jump_label_key){ .enabled = ATOMIC_INIT(0), })
+#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
+#define jump_label_enabled static_key_enabled
 
 #endif	/* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0eac07c95255..7dfaae7846ab 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -214,8 +214,8 @@ enum {
 #include <linux/skbuff.h>
 
 #ifdef CONFIG_RPS
-#include <linux/jump_label.h>
-extern struct jump_label_key rps_needed;
+#include <linux/static_key.h>
+extern struct static_key rps_needed;
 #endif
 
 struct neighbour;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index b809265607d0..29734be334c1 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -163,13 +163,13 @@ extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
 extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 
 #if defined(CONFIG_JUMP_LABEL)
-#include <linux/jump_label.h>
-extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+#include <linux/static_key.h>
+extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
 {
 	if (__builtin_constant_p(pf) &&
 	    __builtin_constant_p(hook))
-		return static_branch(&nf_hooks_needed[pf][hook]);
+		return static_key_false(&nf_hooks_needed[pf][hook]);
 
 	return !list_empty(&nf_hooks[pf][hook]);
 }
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 412b790f5da6..0d21e6f1cf53 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -514,7 +514,7 @@ struct perf_guest_info_callbacks {
 #include <linux/ftrace.h>
 #include <linux/cpu.h>
 #include <linux/irq_work.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 #include <linux/atomic.h>
 #include <asm/local.h>
 
@@ -1038,7 +1038,7 @@ static inline int is_software_event(struct perf_event *event)
 	return event->pmu->task_ctx_nr == perf_sw_context;
 }
 
-extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
+extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
 
 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
 
@@ -1066,7 +1066,7 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 {
 	struct pt_regs hot_regs;
 
-	if (static_branch(&perf_swevent_enabled[event_id])) {
+	if (static_key_false(&perf_swevent_enabled[event_id])) {
 		if (!regs) {
 			perf_fetch_caller_regs(&hot_regs);
 			regs = &hot_regs;
@@ -1075,12 +1075,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 	}
 }
 
-extern struct jump_label_key_deferred perf_sched_events;
+extern struct static_key_deferred perf_sched_events;
 
 static inline void perf_event_task_sched_in(struct task_struct *prev,
 					    struct task_struct *task)
 {
-	if (static_branch(&perf_sched_events.key))
+	if (static_key_false(&perf_sched_events.key))
 		__perf_event_task_sched_in(prev, task);
 }
 
@@ -1089,7 +1089,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
 {
 	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
 
-	if (static_branch(&perf_sched_events.key))
+	if (static_key_false(&perf_sched_events.key))
 		__perf_event_task_sched_out(prev, next);
 }
 
diff --git a/include/linux/static_key.h b/include/linux/static_key.h
new file mode 100644
index 000000000000..27bd3f8a0857
--- /dev/null
+++ b/include/linux/static_key.h
@@ -0,0 +1 @@
+#include <linux/jump_label.h>
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index fc36da97ff7e..bd96ecd0e05c 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -17,7 +17,7 @@
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/rcupdate.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 
 struct module;
 struct tracepoint;
@@ -29,7 +29,7 @@ struct tracepoint_func {
 
 struct tracepoint {
 	const char *name;		/* Tracepoint name */
-	struct jump_label_key key;
+	struct static_key key;
 	void (*regfunc)(void);
 	void (*unregfunc)(void);
 	struct tracepoint_func __rcu *funcs;
@@ -145,7 +145,7 @@ static inline void tracepoint_synchronize_unregister(void)
 	extern struct tracepoint __tracepoint_##name;			\
 	static inline void trace_##name(proto)				\
 	{								\
-		if (static_branch(&__tracepoint_##name.key))		\
+		if (static_key_false(&__tracepoint_##name.key))		\
 			__DO_TRACE(&__tracepoint_##name,		\
 				TP_PROTO(data_proto),			\
 				TP_ARGS(data_args),			\
@@ -188,7 +188,7 @@ static inline void tracepoint_synchronize_unregister(void)
 	__attribute__((section("__tracepoints_strings"))) = #name;	 \
 	struct tracepoint __tracepoint_##name				 \
 	__attribute__((section("__tracepoints"))) =			 \
-		{ __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\
+		{ __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\
 	static struct tracepoint * const __tracepoint_ptr_##name __used	 \
 	__attribute__((section("__tracepoints_ptrs"))) =		 \
 		&__tracepoint_##name;
diff --git a/include/net/sock.h b/include/net/sock.h
index 91c1c8baf020..dcde2d9268cd 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -55,7 +55,7 @@
 #include <linux/uaccess.h>
 #include <linux/memcontrol.h>
 #include <linux/res_counter.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 
 #include <linux/filter.h>
 #include <linux/rculist_nulls.h>
@@ -924,13 +924,13 @@ inline void sk_refcnt_debug_release(const struct sock *sk)
 #endif /* SOCK_REFCNT_DEBUG */
 
 #if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET)
-extern struct jump_label_key memcg_socket_limit_enabled;
+extern struct static_key memcg_socket_limit_enabled;
 static inline struct cg_proto *parent_cg_proto(struct proto *proto,
 					       struct cg_proto *cg_proto)
 {
 	return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
 }
-#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled)
+#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled)
 #else
 #define mem_cgroup_sockets_enabled 0
 static inline struct cg_proto *parent_cg_proto(struct proto *proto,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7c3b9de55f6b..5e0f8bb89b2b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -128,7 +128,7 @@ enum event_type_t {
  * perf_sched_events : >0 events exist
  * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
  */
-struct jump_label_key_deferred perf_sched_events __read_mostly;
+struct static_key_deferred perf_sched_events __read_mostly;
 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
 
 static atomic_t nr_mmap_events __read_mostly;
@@ -2769,7 +2769,7 @@ static void free_event(struct perf_event *event)
 
 	if (!event->parent) {
 		if (event->attach_state & PERF_ATTACH_TASK)
-			jump_label_dec_deferred(&perf_sched_events);
+			static_key_slow_dec_deferred(&perf_sched_events);
 		if (event->attr.mmap || event->attr.mmap_data)
 			atomic_dec(&nr_mmap_events);
 		if (event->attr.comm)
@@ -2780,7 +2780,7 @@ static void free_event(struct perf_event *event)
 			put_callchain_buffers();
 		if (is_cgroup_event(event)) {
 			atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
-			jump_label_dec_deferred(&perf_sched_events);
+			static_key_slow_dec_deferred(&perf_sched_events);
 		}
 	}
 
@@ -4982,7 +4982,7 @@ static int swevent_hlist_get(struct perf_event *event)
 	return err;
 }
 
-struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
+struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
 
 static void sw_perf_event_destroy(struct perf_event *event)
 {
@@ -4990,7 +4990,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
 
 	WARN_ON(event->parent);
 
-	jump_label_dec(&perf_swevent_enabled[event_id]);
+	static_key_slow_dec(&perf_swevent_enabled[event_id]);
 	swevent_hlist_put(event);
 }
 
@@ -5020,7 +5020,7 @@ static int perf_swevent_init(struct perf_event *event)
 		if (err)
 			return err;
 
-		jump_label_inc(&perf_swevent_enabled[event_id]);
+		static_key_slow_inc(&perf_swevent_enabled[event_id]);
 		event->destroy = sw_perf_event_destroy;
 	}
 
@@ -5843,7 +5843,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
 	if (!event->parent) {
 		if (event->attach_state & PERF_ATTACH_TASK)
-			jump_label_inc(&perf_sched_events.key);
+			static_key_slow_inc(&perf_sched_events.key);
 		if (event->attr.mmap || event->attr.mmap_data)
 			atomic_inc(&nr_mmap_events);
 		if (event->attr.comm)
@@ -6081,7 +6081,7 @@ SYSCALL_DEFINE5(perf_event_open,
 		 * - that may need work on context switch
 		 */
 		atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
-		jump_label_inc(&perf_sched_events.key);
+		static_key_slow_inc(&perf_sched_events.key);
 	}
 
 	/*
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 543782e7cdd2..bf9dcadbb53a 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -12,7 +12,7 @@
 #include <linux/slab.h>
 #include <linux/sort.h>
 #include <linux/err.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 
 #ifdef HAVE_JUMP_LABEL
 
@@ -29,10 +29,11 @@ void jump_label_unlock(void)
 	mutex_unlock(&jump_label_mutex);
 }
 
-bool jump_label_enabled(struct jump_label_key *key)
+bool static_key_enabled(struct static_key *key)
 {
-	return !!atomic_read(&key->enabled);
+	return (atomic_read(&key->enabled) > 0);
 }
+EXPORT_SYMBOL_GPL(static_key_enabled);
 
 static int jump_label_cmp(const void *a, const void *b)
 {
@@ -58,22 +59,26 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
 }
 
-static void jump_label_update(struct jump_label_key *key, int enable);
+static void jump_label_update(struct static_key *key, int enable);
 
-void jump_label_inc(struct jump_label_key *key)
+void static_key_slow_inc(struct static_key *key)
 {
 	if (atomic_inc_not_zero(&key->enabled))
 		return;
 
 	jump_label_lock();
-	if (atomic_read(&key->enabled) == 0)
-		jump_label_update(key, JUMP_LABEL_ENABLE);
+	if (atomic_read(&key->enabled) == 0) {
+		if (!jump_label_get_branch_default(key))
+			jump_label_update(key, JUMP_LABEL_ENABLE);
+		else
+			jump_label_update(key, JUMP_LABEL_DISABLE);
+	}
 	atomic_inc(&key->enabled);
 	jump_label_unlock();
 }
-EXPORT_SYMBOL_GPL(jump_label_inc);
+EXPORT_SYMBOL_GPL(static_key_slow_inc);
 
-static void __jump_label_dec(struct jump_label_key *key,
+static void __static_key_slow_dec(struct static_key *key,
 		unsigned long rate_limit, struct delayed_work *work)
 {
 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
@@ -85,32 +90,35 @@ static void __jump_label_dec(struct jump_label_key *key,
 	if (rate_limit) {
 		atomic_inc(&key->enabled);
 		schedule_delayed_work(work, rate_limit);
-	} else
-		jump_label_update(key, JUMP_LABEL_DISABLE);
-
+	} else {
+		if (!jump_label_get_branch_default(key))
+			jump_label_update(key, JUMP_LABEL_DISABLE);
+		else
+			jump_label_update(key, JUMP_LABEL_ENABLE);
+	}
 	jump_label_unlock();
 }
-EXPORT_SYMBOL_GPL(jump_label_dec);
 
 static void jump_label_update_timeout(struct work_struct *work)
 {
-	struct jump_label_key_deferred *key =
-		container_of(work, struct jump_label_key_deferred, work.work);
-	__jump_label_dec(&key->key, 0, NULL);
+	struct static_key_deferred *key =
+		container_of(work, struct static_key_deferred, work.work);
+	__static_key_slow_dec(&key->key, 0, NULL);
 }
 
-void jump_label_dec(struct jump_label_key *key)
+void static_key_slow_dec(struct static_key *key)
 {
-	__jump_label_dec(key, 0, NULL);
+	__static_key_slow_dec(key, 0, NULL);
 }
+EXPORT_SYMBOL_GPL(static_key_slow_dec);
 
-void jump_label_dec_deferred(struct jump_label_key_deferred *key)
+void static_key_slow_dec_deferred(struct static_key_deferred *key)
 {
-	__jump_label_dec(&key->key, key->timeout, &key->work);
+	__static_key_slow_dec(&key->key, key->timeout, &key->work);
 }
+EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
 
-
-void jump_label_rate_limit(struct jump_label_key_deferred *key,
+void jump_label_rate_limit(struct static_key_deferred *key,
 		unsigned long rl)
 {
 	key->timeout = rl;
@@ -153,7 +161,7 @@ void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry
 	arch_jump_label_transform(entry, type);	
 }
 
-static void __jump_label_update(struct jump_label_key *key,
+static void __jump_label_update(struct static_key *key,
 				struct jump_entry *entry,
 				struct jump_entry *stop, int enable)
 {
@@ -170,27 +178,40 @@ static void __jump_label_update(struct jump_label_key *key,
 	}
 }
 
+static enum jump_label_type jump_label_type(struct static_key *key)
+{
+	bool true_branch = jump_label_get_branch_default(key);
+	bool state = static_key_enabled(key);
+
+	if ((!true_branch && state) || (true_branch && !state))
+		return JUMP_LABEL_ENABLE;
+
+	return JUMP_LABEL_DISABLE;
+}
+
 void __init jump_label_init(void)
 {
 	struct jump_entry *iter_start = __start___jump_table;
 	struct jump_entry *iter_stop = __stop___jump_table;
-	struct jump_label_key *key = NULL;
+	struct static_key *key = NULL;
 	struct jump_entry *iter;
 
 	jump_label_lock();
 	jump_label_sort_entries(iter_start, iter_stop);
 
 	for (iter = iter_start; iter < iter_stop; iter++) {
-		struct jump_label_key *iterk;
+		struct static_key *iterk;
 
-		iterk = (struct jump_label_key *)(unsigned long)iter->key;
-		arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
-						 JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
+		iterk = (struct static_key *)(unsigned long)iter->key;
+		arch_jump_label_transform_static(iter, jump_label_type(iterk));
 		if (iterk == key)
 			continue;
 
 		key = iterk;
-		key->entries = iter;
+		/*
+		 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
+		 */
+		*((unsigned long *)&key->entries) += (unsigned long)iter;
 #ifdef CONFIG_MODULES
 		key->next = NULL;
 #endif
@@ -200,8 +221,8 @@ void __init jump_label_init(void)
 
 #ifdef CONFIG_MODULES
 
-struct jump_label_mod {
-	struct jump_label_mod *next;
+struct static_key_mod {
+	struct static_key_mod *next;
 	struct jump_entry *entries;
 	struct module *mod;
 };
@@ -221,9 +242,9 @@ static int __jump_label_mod_text_reserved(void *start, void *end)
 				start, end);
 }
 
-static void __jump_label_mod_update(struct jump_label_key *key, int enable)
+static void __jump_label_mod_update(struct static_key *key, int enable)
 {
-	struct jump_label_mod *mod = key->next;
+	struct static_key_mod *mod = key->next;
 
 	while (mod) {
 		struct module *m = mod->mod;
@@ -254,11 +275,7 @@ void jump_label_apply_nops(struct module *mod)
 		return;
 
 	for (iter = iter_start; iter < iter_stop; iter++) {
-		struct jump_label_key *iterk;
-
-		iterk = (struct jump_label_key *)(unsigned long)iter->key;
-		arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
-				JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
+		arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
 	}
 }
 
@@ -267,8 +284,8 @@ static int jump_label_add_module(struct module *mod)
 	struct jump_entry *iter_start = mod->jump_entries;
 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
 	struct jump_entry *iter;
-	struct jump_label_key *key = NULL;
-	struct jump_label_mod *jlm;
+	struct static_key *key = NULL;
+	struct static_key_mod *jlm;
 
 	/* if the module doesn't have jump label entries, just return */
 	if (iter_start == iter_stop)
@@ -277,28 +294,30 @@ static int jump_label_add_module(struct module *mod)
 	jump_label_sort_entries(iter_start, iter_stop);
 
 	for (iter = iter_start; iter < iter_stop; iter++) {
-		if (iter->key == (jump_label_t)(unsigned long)key)
-			continue;
+		struct static_key *iterk;
 
-		key = (struct jump_label_key *)(unsigned long)iter->key;
+		iterk = (struct static_key *)(unsigned long)iter->key;
+		if (iterk == key)
+			continue;
 
+		key = iterk;
 		if (__module_address(iter->key) == mod) {
-			atomic_set(&key->enabled, 0);
-			key->entries = iter;
+			/*
+			 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
+			 */
+			*((unsigned long *)&key->entries) += (unsigned long)iter;
 			key->next = NULL;
 			continue;
 		}
-
-		jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL);
+		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
 		if (!jlm)
 			return -ENOMEM;
-
 		jlm->mod = mod;
 		jlm->entries = iter;
 		jlm->next = key->next;
 		key->next = jlm;
 
-		if (jump_label_enabled(key))
+		if (jump_label_type(key) == JUMP_LABEL_ENABLE)
 			__jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
 	}
 
@@ -310,14 +329,14 @@ static void jump_label_del_module(struct module *mod)
 	struct jump_entry *iter_start = mod->jump_entries;
 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
 	struct jump_entry *iter;
-	struct jump_label_key *key = NULL;
-	struct jump_label_mod *jlm, **prev;
+	struct static_key *key = NULL;
+	struct static_key_mod *jlm, **prev;
 
 	for (iter = iter_start; iter < iter_stop; iter++) {
 		if (iter->key == (jump_label_t)(unsigned long)key)
 			continue;
 
-		key = (struct jump_label_key *)(unsigned long)iter->key;
+		key = (struct static_key *)(unsigned long)iter->key;
 
 		if (__module_address(iter->key) == mod)
 			continue;
@@ -419,9 +438,10 @@ int jump_label_text_reserved(void *start, void *end)
 	return ret;
 }
 
-static void jump_label_update(struct jump_label_key *key, int enable)
+static void jump_label_update(struct static_key *key, int enable)
 {
-	struct jump_entry *entry = key->entries, *stop = __stop___jump_table;
+	struct jump_entry *stop = __stop___jump_table;
+	struct jump_entry *entry = jump_label_get_entries(key);
 
 #ifdef CONFIG_MODULES
 	struct module *mod = __module_address((unsigned long)key);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5255c9d2e053..112c6824476b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -162,13 +162,13 @@ static int sched_feat_show(struct seq_file *m, void *v)
 
 #ifdef HAVE_JUMP_LABEL
 
-#define jump_label_key__true  jump_label_key_enabled
-#define jump_label_key__false jump_label_key_disabled
+#define jump_label_key__true  STATIC_KEY_INIT_TRUE
+#define jump_label_key__false STATIC_KEY_INIT_FALSE
 
 #define SCHED_FEAT(name, enabled)	\
 	jump_label_key__##enabled ,
 
-struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = {
+struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
 #include "features.h"
 };
 
@@ -176,14 +176,14 @@ struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = {
 
 static void sched_feat_disable(int i)
 {
-	if (jump_label_enabled(&sched_feat_keys[i]))
-		jump_label_dec(&sched_feat_keys[i]);
+	if (static_key_enabled(&sched_feat_keys[i]))
+		static_key_slow_dec(&sched_feat_keys[i]);
 }
 
 static void sched_feat_enable(int i)
 {
-	if (!jump_label_enabled(&sched_feat_keys[i]))
-		jump_label_inc(&sched_feat_keys[i]);
+	if (!static_key_enabled(&sched_feat_keys[i]))
+		static_key_slow_inc(&sched_feat_keys[i]);
 }
 #else
 static void sched_feat_disable(int i) { };
@@ -894,7 +894,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
 	delta -= irq_delta;
 #endif
 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-	if (static_branch((&paravirt_steal_rq_enabled))) {
+	if (static_key_false((&paravirt_steal_rq_enabled))) {
 		u64 st;
 
 		steal = paravirt_steal_clock(cpu_of(rq));
@@ -2756,7 +2756,7 @@ void account_idle_time(cputime_t cputime)
 static __always_inline bool steal_account_process_tick(void)
 {
 #ifdef CONFIG_PARAVIRT
-	if (static_branch(&paravirt_steal_enabled)) {
+	if (static_key_false(&paravirt_steal_enabled)) {
 		u64 steal, st = 0;
 
 		steal = paravirt_steal_clock(smp_processor_id());
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7c6414fc669d..423547ada38a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1399,20 +1399,20 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
 #ifdef CONFIG_CFS_BANDWIDTH
 
 #ifdef HAVE_JUMP_LABEL
-static struct jump_label_key __cfs_bandwidth_used;
+static struct static_key __cfs_bandwidth_used;
 
 static inline bool cfs_bandwidth_used(void)
 {
-	return static_branch(&__cfs_bandwidth_used);
+	return static_key_false(&__cfs_bandwidth_used);
 }
 
 void account_cfs_bandwidth_used(int enabled, int was_enabled)
 {
 	/* only need to count groups transitioning between enabled/!enabled */
 	if (enabled && !was_enabled)
-		jump_label_inc(&__cfs_bandwidth_used);
+		static_key_slow_inc(&__cfs_bandwidth_used);
 	else if (!enabled && was_enabled)
-		jump_label_dec(&__cfs_bandwidth_used);
+		static_key_slow_dec(&__cfs_bandwidth_used);
 }
 #else /* HAVE_JUMP_LABEL */
 static bool cfs_bandwidth_used(void)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 98c0c2623db8..b4cd6d8ea150 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -611,7 +611,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
  * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
  */
 #ifdef CONFIG_SCHED_DEBUG
-# include <linux/jump_label.h>
+# include <linux/static_key.h>
 # define const_debug __read_mostly
 #else
 # define const_debug const
@@ -630,18 +630,18 @@ enum {
 #undef SCHED_FEAT
 
 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
-static __always_inline bool static_branch__true(struct jump_label_key *key)
+static __always_inline bool static_branch__true(struct static_key *key)
 {
-	return likely(static_branch(key)); /* Not out of line branch. */
+	return static_key_true(key); /* Not out of line branch. */
 }
 
-static __always_inline bool static_branch__false(struct jump_label_key *key)
+static __always_inline bool static_branch__false(struct static_key *key)
 {
-	return unlikely(static_branch(key)); /* Out of line branch. */
+	return static_key_false(key); /* Out of line branch. */
 }
 
 #define SCHED_FEAT(name, enabled)					\
-static __always_inline bool static_branch_##name(struct jump_label_key *key) \
+static __always_inline bool static_branch_##name(struct static_key *key) \
 {									\
 	return static_branch__##enabled(key);				\
 }
@@ -650,7 +650,7 @@ static __always_inline bool static_branch_##name(struct jump_label_key *key) \
 
 #undef SCHED_FEAT
 
-extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR];
+extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
 #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index f1539decd99d..d96ba22dabfa 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -25,7 +25,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 
 extern struct tracepoint * const __start___tracepoints_ptrs[];
 extern struct tracepoint * const __stop___tracepoints_ptrs[];
@@ -256,9 +256,9 @@ static void set_tracepoint(struct tracepoint_entry **entry,
 {
 	WARN_ON(strcmp((*entry)->name, elem->name) != 0);
 
-	if (elem->regfunc && !jump_label_enabled(&elem->key) && active)
+	if (elem->regfunc && !static_key_enabled(&elem->key) && active)
 		elem->regfunc();
-	else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active)
+	else if (elem->unregfunc && static_key_enabled(&elem->key) && !active)
 		elem->unregfunc();
 
 	/*
@@ -269,10 +269,10 @@ static void set_tracepoint(struct tracepoint_entry **entry,
 	 * is used.
 	 */
 	rcu_assign_pointer(elem->funcs, (*entry)->funcs);
-	if (active && !jump_label_enabled(&elem->key))
-		jump_label_inc(&elem->key);
-	else if (!active && jump_label_enabled(&elem->key))
-		jump_label_dec(&elem->key);
+	if (active && !static_key_enabled(&elem->key))
+		static_key_slow_inc(&elem->key);
+	else if (!active && static_key_enabled(&elem->key))
+		static_key_slow_dec(&elem->key);
 }
 
 /*
@@ -283,11 +283,11 @@ static void set_tracepoint(struct tracepoint_entry **entry,
  */
 static void disable_tracepoint(struct tracepoint *elem)
 {
-	if (elem->unregfunc && jump_label_enabled(&elem->key))
+	if (elem->unregfunc && static_key_enabled(&elem->key))
 		elem->unregfunc();
 
-	if (jump_label_enabled(&elem->key))
-		jump_label_dec(&elem->key);
+	if (static_key_enabled(&elem->key))
+		static_key_slow_dec(&elem->key);
 	rcu_assign_pointer(elem->funcs, NULL);
 }
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 115dee1d985d..da7ce7f0e566 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -134,7 +134,7 @@
 #include <linux/inetdevice.h>
 #include <linux/cpu_rmap.h>
 #include <linux/net_tstamp.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 #include <net/flow_keys.h>
 
 #include "net-sysfs.h"
@@ -1441,11 +1441,11 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
 }
 EXPORT_SYMBOL(call_netdevice_notifiers);
 
-static struct jump_label_key netstamp_needed __read_mostly;
+static struct static_key netstamp_needed __read_mostly;
 #ifdef HAVE_JUMP_LABEL
-/* We are not allowed to call jump_label_dec() from irq context
+/* We are not allowed to call static_key_slow_dec() from irq context
  * If net_disable_timestamp() is called from irq context, defer the
- * jump_label_dec() calls.
+ * static_key_slow_dec() calls.
  */
 static atomic_t netstamp_needed_deferred;
 #endif
@@ -1457,12 +1457,12 @@ void net_enable_timestamp(void)
 
 	if (deferred) {
 		while (--deferred)
-			jump_label_dec(&netstamp_needed);
+			static_key_slow_dec(&netstamp_needed);
 		return;
 	}
 #endif
 	WARN_ON(in_interrupt());
-	jump_label_inc(&netstamp_needed);
+	static_key_slow_inc(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_enable_timestamp);
 
@@ -1474,19 +1474,19 @@ void net_disable_timestamp(void)
 		return;
 	}
 #endif
-	jump_label_dec(&netstamp_needed);
+	static_key_slow_dec(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_disable_timestamp);
 
 static inline void net_timestamp_set(struct sk_buff *skb)
 {
 	skb->tstamp.tv64 = 0;
-	if (static_branch(&netstamp_needed))
+	if (static_key_false(&netstamp_needed))
 		__net_timestamp(skb);
 }
 
 #define net_timestamp_check(COND, SKB)			\
-	if (static_branch(&netstamp_needed)) {		\
+	if (static_key_false(&netstamp_needed)) {		\
 		if ((COND) && !(SKB)->tstamp.tv64)	\
 			__net_timestamp(SKB);		\
 	}						\
@@ -2660,7 +2660,7 @@ EXPORT_SYMBOL(__skb_get_rxhash);
 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
 EXPORT_SYMBOL(rps_sock_flow_table);
 
-struct jump_label_key rps_needed __read_mostly;
+struct static_key rps_needed __read_mostly;
 
 static struct rps_dev_flow *
 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
@@ -2945,7 +2945,7 @@ int netif_rx(struct sk_buff *skb)
 
 	trace_netif_rx(skb);
 #ifdef CONFIG_RPS
-	if (static_branch(&rps_needed))	{
+	if (static_key_false(&rps_needed)) {
 		struct rps_dev_flow voidflow, *rflow = &voidflow;
 		int cpu;
 
@@ -3309,7 +3309,7 @@ int netif_receive_skb(struct sk_buff *skb)
 		return NET_RX_SUCCESS;
 
 #ifdef CONFIG_RPS
-	if (static_branch(&rps_needed)) {
+	if (static_key_false(&rps_needed)) {
 		struct rps_dev_flow voidflow, *rflow = &voidflow;
 		int cpu, ret;
 
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index a1727cda03d7..495586232aa1 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -608,10 +608,10 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
 	spin_unlock(&rps_map_lock);
 
 	if (map)
-		jump_label_inc(&rps_needed);
+		static_key_slow_inc(&rps_needed);
 	if (old_map) {
 		kfree_rcu(old_map, rcu);
-		jump_label_dec(&rps_needed);
+		static_key_slow_dec(&rps_needed);
 	}
 	free_cpumask_var(mask);
 	return len;
diff --git a/net/core/sock.c b/net/core/sock.c
index 3e81fd2e3c75..3a4e5817a2a7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -111,7 +111,7 @@
 #include <linux/init.h>
 #include <linux/highmem.h>
 #include <linux/user_namespace.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 #include <linux/memcontrol.h>
 
 #include <asm/uaccess.h>
@@ -184,7 +184,7 @@ void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
 static struct lock_class_key af_family_keys[AF_MAX];
 static struct lock_class_key af_family_slock_keys[AF_MAX];
 
-struct jump_label_key memcg_socket_limit_enabled;
+struct static_key memcg_socket_limit_enabled;
 EXPORT_SYMBOL(memcg_socket_limit_enabled);
 
 /*
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index d05559d4d9cd..0c2850874254 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -69,9 +69,9 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
 		if (sock_table != orig_sock_table) {
 			rcu_assign_pointer(rps_sock_flow_table, sock_table);
 			if (sock_table)
-				jump_label_inc(&rps_needed);
+				static_key_slow_inc(&rps_needed);
 			if (orig_sock_table) {
-				jump_label_dec(&rps_needed);
+				static_key_slow_dec(&rps_needed);
 				synchronize_rcu();
 				vfree(orig_sock_table);
 			}
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 49978788a9dc..602fb305365f 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -111,7 +111,7 @@ void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
 	val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
 
 	if (val != RESOURCE_MAX)
-		jump_label_dec(&memcg_socket_limit_enabled);
+		static_key_slow_dec(&memcg_socket_limit_enabled);
 }
 EXPORT_SYMBOL(tcp_destroy_cgroup);
 
@@ -143,9 +143,9 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
 					     net->ipv4.sysctl_tcp_mem[i]);
 
 	if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX)
-		jump_label_dec(&memcg_socket_limit_enabled);
+		static_key_slow_dec(&memcg_socket_limit_enabled);
 	else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX)
-		jump_label_inc(&memcg_socket_limit_enabled);
+		static_key_slow_inc(&memcg_socket_limit_enabled);
 
 	return 0;
 }
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index b4e8ff05b301..e1b7e051332e 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -56,7 +56,7 @@ struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
 EXPORT_SYMBOL(nf_hooks);
 
 #if defined(CONFIG_JUMP_LABEL)
-struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 EXPORT_SYMBOL(nf_hooks_needed);
 #endif
 
@@ -77,7 +77,7 @@ int nf_register_hook(struct nf_hook_ops *reg)
 	list_add_rcu(&reg->list, elem->list.prev);
 	mutex_unlock(&nf_hook_mutex);
 #if defined(CONFIG_JUMP_LABEL)
-	jump_label_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
+	static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
 	return 0;
 }
@@ -89,7 +89,7 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
 	list_del_rcu(&reg->list);
 	mutex_unlock(&nf_hook_mutex);
 #if defined(CONFIG_JUMP_LABEL)
-	jump_label_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
+	static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
 	synchronize_net();
 }

commit 10ea9d6f5663c3ed51f91cd037b4b9b147273cbb
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Feb 22 13:19:37 2012 +0100

    clocksource: scx200_hrt: Fix the build
    
    This commit:
    
      12d6d41276de: clocksource: scx200_hrt: Convert scx200 to use clocksource_register_hz
    
    Breaks the build on x86-32:
    
      drivers/clocksource/scx200_hrt.c: In function ‘init_hrt_clocksource’:
      drivers/clocksource/scx200_hrt.c:95:0: error: unterminated argument list invoking macro "pr_info"
      drivers/clocksource/scx200_hrt.c:84:2: error: ‘pr_info’ undeclared (first use in this function)
    
    It could not possibly have been build tested, because it had this mismerge:
    
            pr_info("enabling scx200 high-res timer (%s MHz +%d ppm)\n",
            printk(KERN_INFO "enabling scx200 high-res timer (%s MHz +%d ppm)\n",
                    mhz27 ? "27":"1", ppm);
    
    Cc: Jim Cromie <jim.cromie@gmail.com>
    Cc: John Stultz <john.stultz@linaro.org>
    Link: http://lkml.kernel.org/n/tip-jceb26fns5w7tv8edlivhxpa@git.kernel.org
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c
index 60db8b1492f1..64f9e8294434 100644
--- a/drivers/clocksource/scx200_hrt.c
+++ b/drivers/clocksource/scx200_hrt.c
@@ -81,9 +81,7 @@ static int __init init_hrt_clocksource(void)
 	if (mhz27)
 		freq *= 27;
 
-	pr_info("enabling scx200 high-res timer (%s MHz +%d ppm)\n",
-	printk(KERN_INFO "enabling scx200 high-res timer (%s MHz +%d ppm)\n",
-		mhz27 ? "27":"1", ppm);
+	pr_info("enabling scx200 high-res timer (%s MHz +%d ppm)\n", mhz27 ? "27":"1", ppm);
 
 	return clocksource_register_hz(&cs_hrt, freq);
 }

commit 35aa621b5ab9d08767f7bc8d209b696df281d715
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Feb 22 11:37:29 2012 +0100

    uprobes: Update copyright notices
    
    Add Peter Zijlstra's copyright to the uprobes code, whose
    contributions to the uprobes code are not visible in the Git
    history, because they were backmerged.
    
    Also update existing copyright notices to the year 2012.
    
    Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
    Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
    Cc: Jim Keniston <jkenisto@linux.vnet.ibm.com>
    Link: http://lkml.kernel.org/n/tip-vjqxst502pc1efz7ah8cyht4@git.kernel.org
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 9c6be62787ed..f85797e1ccd4 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -17,10 +17,11 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  *
- * Copyright (C) IBM Corporation, 2008-2011
+ * Copyright (C) IBM Corporation, 2008-2012
  * Authors:
  *	Srikar Dronamraju
  *	Jim Keniston
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  */
 
 #include <linux/errno.h>
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 13f1b5909af4..5ce32e3ae9e9 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -15,10 +15,11 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  *
- * Copyright (C) IBM Corporation, 2008-2011
+ * Copyright (C) IBM Corporation, 2008-2012
  * Authors:
  *	Srikar Dronamraju
  *	Jim Keniston
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  */
 
 #include <linux/kernel.h>

commit a5f4374a9610fd7286c2164d4e680436727eff71
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Feb 22 11:01:49 2012 +0100

    uprobes: Move to kernel/events/
    
    Consolidate the uprobes code under kernel/events/, where the various
    core kernel event handling routines live.
    
    Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
    Cc: Jim Keniston <jkenisto@us.ibm.com>
    Cc: Oleg Nesterov <oleg@redhat.com>
    Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
    Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
    Cc: Anton Arapov <anton@redhat.com>
    Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
    Link: http://lkml.kernel.org/n/tip-biuyhhwohxgbp2vzbap5yr8o@git.kernel.org
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/arch/Kconfig b/arch/Kconfig
index cca5b545d806..d0e37c9d5f6b 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -67,7 +67,7 @@ config OPTPROBES
 
 config UPROBES
 	bool "Transparent user-space probes (EXPERIMENTAL)"
-	depends on ARCH_SUPPORTS_UPROBES
+	depends on ARCH_SUPPORTS_UPROBES && PERF_EVENTS
 	default n
 	help
 	  Uprobes is the user-space counterpart to kprobes: they
diff --git a/kernel/Makefile b/kernel/Makefile
index 8609dd3d875a..2d9de86b7e76 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -107,7 +107,6 @@ obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
 obj-$(CONFIG_PADATA) += padata.o
 obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
 obj-$(CONFIG_JUMP_LABEL) += jump_label.o
-obj-$(CONFIG_UPROBES) += uprobes.o
 
 $(obj)/configs.o: $(obj)/config_data.h
 
diff --git a/kernel/events/Makefile b/kernel/events/Makefile
index 22d901f9caf4..103f5d147b2f 100644
--- a/kernel/events/Makefile
+++ b/kernel/events/Makefile
@@ -3,4 +3,7 @@ CFLAGS_REMOVE_core.o = -pg
 endif
 
 obj-y := core.o ring_buffer.o callchain.o
+
 obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_UPROBES) += uprobes.o
+
diff --git a/kernel/uprobes.c b/kernel/events/uprobes.c
similarity index 100%
rename from kernel/uprobes.c
rename to kernel/events/uprobes.c

commit 034d150a44a2b428e273e69889397c01f63eaf14
Merge: 09bda4432a8a 6b1bee9035d4
Author: Ingo Molnar <mingo@elte.hu>
Date:   Wed Feb 22 10:36:56 2012 +0100

    Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
    
    Small fixes, also includes an important fix from Stephane for system
    wide monitoring, problem introduced recently in perf/core, in the pid
    list patches.
    
    Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

commit 09bda4432a8a4d4db2b2b94697abc8d732a9ff73
Merge: d1e169da9e20 47b0edcb599e
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Feb 17 12:55:07 2012 +0100

    Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core

commit 7b2d81d48a2d8e37efb6ce7b4d5ef58822b30d89
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Feb 17 09:27:41 2012 +0100

    uprobes/core: Clean up, refactor and improve the code
    
    Make the uprobes code readable to me:
    
     - improve the Kconfig text so that a mere mortal gets some idea
       what CONFIG_UPROBES=y is really about
    
     - do trivial renames to standardize around the uprobes_*() namespace
    
     - clean up and simplify various code flow details
    
     - separate basic blocks of functionality
    
     - line break artifact and white space related removal
    
     - use standard local varible definition blocks
    
     - use vertical spacing to make things more readable
    
     - remove unnecessary volatile
    
     - restructure comment blocks to make them more uniform and
       more readable in general
    
    Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
    Cc: Jim Keniston <jkenisto@us.ibm.com>
    Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Cc: Oleg Nesterov <oleg@redhat.com>
    Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
    Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
    Cc: Anton Arapov <anton@redhat.com>
    Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
    Link: http://lkml.kernel.org/n/tip-ewbwhb8o6navvllsauu7k07p@git.kernel.org
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/arch/Kconfig b/arch/Kconfig
index 284f5898f526..cca5b545d806 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -66,13 +66,19 @@ config OPTPROBES
 	depends on !PREEMPT
 
 config UPROBES
-	bool "User-space probes (EXPERIMENTAL)"
+	bool "Transparent user-space probes (EXPERIMENTAL)"
 	depends on ARCH_SUPPORTS_UPROBES
 	default n
 	help
-	  Uprobes enables kernel subsystems to establish probepoints
-	  in user applications and execute handler functions when
-	  the probepoints are hit.
+	  Uprobes is the user-space counterpart to kprobes: they
+	  enable instrumentation applications (such as 'perf probe')
+	  to establish unintrusive probes in user-space binaries and
+	  libraries, by executing handler functions when the probes
+	  are hit by user-space applications.
+
+	  ( These probes come in the form of single-byte breakpoints,
+	    managed by the kernel and kept transparent to the probed
+	    application. )
 
 	  If in doubt, say "N".
 
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 8208234391ff..072df3902636 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -1,7 +1,7 @@
 #ifndef _ASM_UPROBES_H
 #define _ASM_UPROBES_H
 /*
- * Userspace Probes (UProbes) for x86
+ * User-space Probes (UProbes) for x86
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -24,19 +24,20 @@
  */
 
 typedef u8 uprobe_opcode_t;
-#define MAX_UINSN_BYTES 16
-#define UPROBES_XOL_SLOT_BYTES	128	/* to keep it cache aligned */
 
-#define UPROBES_BKPT_INSN 0xcc
-#define UPROBES_BKPT_INSN_SIZE 1
+#define MAX_UINSN_BYTES			  16
+#define UPROBES_XOL_SLOT_BYTES		 128	/* to keep it cache aligned */
+
+#define UPROBES_BKPT_INSN		0xcc
+#define UPROBES_BKPT_INSN_SIZE		   1
 
 struct uprobe_arch_info {
-	u16			fixups;
+	u16				fixups;
 #ifdef CONFIG_X86_64
-	unsigned long rip_rela_target_address;
+	unsigned long			rip_rela_target_address;
 #endif
 };
 
 struct uprobe;
-extern int analyze_insn(struct mm_struct *mm, struct uprobe *uprobe);
+extern int arch_uprobes_analyze_insn(struct mm_struct *mm, struct uprobe *uprobe);
 #endif	/* _ASM_UPROBES_H */
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 2a301bb91bdb..cf2a18498425 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -1,5 +1,5 @@
 /*
- * Userspace Probes (UProbes) for x86
+ * User-space Probes (UProbes) for x86
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -20,7 +20,6 @@
  *	Srikar Dronamraju
  *	Jim Keniston
  */
-
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/ptrace.h>
@@ -42,10 +41,10 @@
 #define UPROBES_FIX_RIP_CX	0x4000
 
 /* Adaptations for mhiramat x86 decoder v14. */
-#define OPCODE1(insn) ((insn)->opcode.bytes[0])
-#define OPCODE2(insn) ((insn)->opcode.bytes[1])
-#define OPCODE3(insn) ((insn)->opcode.bytes[2])
-#define MODRM_REG(insn) X86_MODRM_REG(insn->modrm.value)
+#define OPCODE1(insn)		((insn)->opcode.bytes[0])
+#define OPCODE2(insn)		((insn)->opcode.bytes[1])
+#define OPCODE3(insn)		((insn)->opcode.bytes[2])
+#define MODRM_REG(insn)		X86_MODRM_REG(insn->modrm.value)
 
 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
 	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
@@ -55,7 +54,7 @@
 	 << (row % 32))
 
 #ifdef CONFIG_X86_64
-static volatile u32 good_insns_64[256 / 32] = {
+static u32 good_insns_64[256 / 32] = {
 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 	/*      ----------------------------------------------         */
 	W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */
@@ -81,7 +80,7 @@ static volatile u32 good_insns_64[256 / 32] = {
 
 /* Good-instruction tables for 32-bit apps */
 
-static volatile u32 good_insns_32[256 / 32] = {
+static u32 good_insns_32[256 / 32] = {
 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 	/*      ----------------------------------------------         */
 	W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */
@@ -105,7 +104,7 @@ static volatile u32 good_insns_32[256 / 32] = {
 };
 
 /* Using this for both 64-bit and 32-bit apps */
-static volatile u32 good_2byte_insns[256 / 32] = {
+static u32 good_2byte_insns[256 / 32] = {
 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 	/*      ----------------------------------------------         */
 	W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */
@@ -132,42 +131,47 @@ static volatile u32 good_2byte_insns[256 / 32] = {
 
 /*
  * opcodes we'll probably never support:
- * 6c-6d, e4-e5, ec-ed - in
- * 6e-6f, e6-e7, ee-ef - out
- * cc, cd - int3, int
- * cf - iret
- * d6 - illegal instruction
- * f1 - int1/icebp
- * f4 - hlt
- * fa, fb - cli, sti
- * 0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2
+ *
+ *  6c-6d, e4-e5, ec-ed - in
+ *  6e-6f, e6-e7, ee-ef - out
+ *  cc, cd - int3, int
+ *  cf - iret
+ *  d6 - illegal instruction
+ *  f1 - int1/icebp
+ *  f4 - hlt
+ *  fa, fb - cli, sti
+ *  0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2
  *
  * invalid opcodes in 64-bit mode:
- * 06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5
  *
- * 63 - we support this opcode in x86_64 but not in i386.
+ *  06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5
+ *  63 - we support this opcode in x86_64 but not in i386.
  *
  * opcodes we may need to refine support for:
- * 0f - 2-byte instructions: For many of these instructions, the validity
- * depends on the prefix and/or the reg field.  On such instructions, we
- * just consider the opcode combination valid if it corresponds to any
- * valid instruction.
- * 8f - Group 1 - only reg = 0 is OK
- * c6-c7 - Group 11 - only reg = 0 is OK
- * d9-df - fpu insns with some illegal encodings
- * f2, f3 - repnz, repz prefixes.  These are also the first byte for
- * certain floating-point instructions, such as addsd.
- * fe - Group 4 - only reg = 0 or 1 is OK
- * ff - Group 5 - only reg = 0-6 is OK
+ *
+ *  0f - 2-byte instructions: For many of these instructions, the validity
+ *  depends on the prefix and/or the reg field.  On such instructions, we
+ *  just consider the opcode combination valid if it corresponds to any
+ *  valid instruction.
+ *
+ *  8f - Group 1 - only reg = 0 is OK
+ *  c6-c7 - Group 11 - only reg = 0 is OK
+ *  d9-df - fpu insns with some illegal encodings
+ *  f2, f3 - repnz, repz prefixes.  These are also the first byte for
+ *  certain floating-point instructions, such as addsd.
+ *
+ *  fe - Group 4 - only reg = 0 or 1 is OK
+ *  ff - Group 5 - only reg = 0-6 is OK
  *
  * others -- Do we need to support these?
- * 0f - (floating-point?) prefetch instructions
- * 07, 17, 1f - pop es, pop ss, pop ds
- * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
+ *
+ *  0f - (floating-point?) prefetch instructions
+ *  07, 17, 1f - pop es, pop ss, pop ds
+ *  26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
  *	but 64 and 65 (fs: and gs:) seem to be used, so we support them
- * 67 - addr16 prefix
- * ce - into
- * f0 - lock prefix
+ *  67 - addr16 prefix
+ *  ce - into
+ *  f0 - lock prefix
  */
 
 /*
@@ -182,11 +186,11 @@ static bool is_prefix_bad(struct insn *insn)
 
 	for (i = 0; i < insn->prefixes.nbytes; i++) {
 		switch (insn->prefixes.bytes[i]) {
-		case 0x26:	/*INAT_PFX_ES   */
-		case 0x2E:	/*INAT_PFX_CS   */
-		case 0x36:	/*INAT_PFX_DS   */
-		case 0x3E:	/*INAT_PFX_SS   */
-		case 0xF0:	/*INAT_PFX_LOCK */
+		case 0x26:	/* INAT_PFX_ES   */
+		case 0x2E:	/* INAT_PFX_CS   */
+		case 0x36:	/* INAT_PFX_DS   */
+		case 0x3E:	/* INAT_PFX_SS   */
+		case 0xF0:	/* INAT_PFX_LOCK */
 			return true;
 		}
 	}
@@ -201,12 +205,15 @@ static int validate_insn_32bits(struct uprobe *uprobe, struct insn *insn)
 	insn_get_opcode(insn);
 	if (is_prefix_bad(insn))
 		return -ENOTSUPP;
+
 	if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_32))
 		return 0;
+
 	if (insn->opcode.nbytes == 2) {
 		if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
 			return 0;
 	}
+
 	return -ENOTSUPP;
 }
 
@@ -282,12 +289,12 @@ static void prepare_fixups(struct uprobe *uprobe, struct insn *insn)
  * disastrous.
  *
  * Some useful facts about rip-relative instructions:
- * - There's always a modrm byte.
- * - There's never a SIB byte.
- * - The displacement is always 4 bytes.
+ *
+ *  - There's always a modrm byte.
+ *  - There's never a SIB byte.
+ *  - The displacement is always 4 bytes.
  */
-static void handle_riprel_insn(struct mm_struct *mm, struct uprobe *uprobe,
-							struct insn *insn)
+static void handle_riprel_insn(struct mm_struct *mm, struct uprobe *uprobe, struct insn *insn)
 {
 	u8 *cursor;
 	u8 reg;
@@ -342,13 +349,12 @@ static void handle_riprel_insn(struct mm_struct *mm, struct uprobe *uprobe,
 	}
 
 	/* Target address = address of next instruction + (signed) offset */
-	uprobe->arch_info.rip_rela_target_address = (long)insn->length
-					+ insn->displacement.value;
+	uprobe->arch_info.rip_rela_target_address = (long)insn->length + insn->displacement.value;
+
 	/* Displacement field is gone; slide immediate field (if any) over. */
 	if (insn->immediate.nbytes) {
 		cursor++;
-		memmove(cursor, cursor + insn->displacement.nbytes,
-						insn->immediate.nbytes);
+		memmove(cursor, cursor + insn->displacement.nbytes, insn->immediate.nbytes);
 	}
 	return;
 }
@@ -361,8 +367,10 @@ static int validate_insn_64bits(struct uprobe *uprobe, struct insn *insn)
 	insn_get_opcode(insn);
 	if (is_prefix_bad(insn))
 		return -ENOTSUPP;
+
 	if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_64))
 		return 0;
+
 	if (insn->opcode.nbytes == 2) {
 		if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
 			return 0;
@@ -370,34 +378,31 @@ static int validate_insn_64bits(struct uprobe *uprobe, struct insn *insn)
 	return -ENOTSUPP;
 }
 
-static int validate_insn_bits(struct mm_struct *mm, struct uprobe *uprobe,
-				struct insn *insn)
+static int validate_insn_bits(struct mm_struct *mm, struct uprobe *uprobe, struct insn *insn)
 {
 	if (mm->context.ia32_compat)
 		return validate_insn_32bits(uprobe, insn);
 	return validate_insn_64bits(uprobe, insn);
 }
-#else
-static void handle_riprel_insn(struct mm_struct *mm, struct uprobe *uprobe,
-							struct insn *insn)
+#else /* 32-bit: */
+static void handle_riprel_insn(struct mm_struct *mm, struct uprobe *uprobe, struct insn *insn)
 {
-	return;
+	/* No RIP-relative addressing on 32-bit */
 }
 
-static int validate_insn_bits(struct mm_struct *mm, struct uprobe *uprobe,
-				struct insn *insn)
+static int validate_insn_bits(struct mm_struct *mm, struct uprobe *uprobe, struct insn *insn)
 {
 	return validate_insn_32bits(uprobe, insn);
 }
 #endif /* CONFIG_X86_64 */
 
 /**
- * analyze_insn - instruction analysis including validity and fixups.
+ * arch_uprobes_analyze_insn - instruction analysis including validity and fixups.
  * @mm: the probed address space.
  * @uprobe: the probepoint information.
  * Return 0 on success or a -ve number on error.
  */
-int analyze_insn(struct mm_struct *mm, struct uprobe *uprobe)
+int arch_uprobes_analyze_insn(struct mm_struct *mm, struct uprobe *uprobe)
 {
 	int ret;
 	struct insn insn;
@@ -406,7 +411,9 @@ int analyze_insn(struct mm_struct *mm, struct uprobe *uprobe)
 	ret = validate_insn_bits(mm, uprobe, &insn);
 	if (ret != 0)
 		return ret;
+
 	handle_riprel_insn(mm, uprobe, &insn);
 	prepare_fixups(uprobe, &insn);
+
 	return 0;
 }
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index f1d13fd140f2..64e45f116b2a 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -1,7 +1,7 @@
 #ifndef _LINUX_UPROBES_H
 #define _LINUX_UPROBES_H
 /*
- * Userspace Probes (UProbes)
+ * User-space Probes (UProbes)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -40,8 +40,10 @@ struct uprobe_arch_info {};
 #define uprobe_opcode_sz sizeof(uprobe_opcode_t)
 
 /* flags that denote/change uprobes behaviour */
+
 /* Have a copy of original instruction */
 #define UPROBES_COPY_INSN	0x1
+
 /* Dont run handlers when first register/ last unregister in progress*/
 #define UPROBES_RUN_HANDLER	0x2
 
@@ -70,27 +72,23 @@ struct uprobe {
 };
 
 #ifdef CONFIG_UPROBES
-extern int __weak set_bkpt(struct mm_struct *mm, struct uprobe *uprobe,
-							unsigned long vaddr);
-extern int __weak set_orig_insn(struct mm_struct *mm, struct uprobe *uprobe,
-					unsigned long vaddr, bool verify);
+extern int __weak set_bkpt(struct mm_struct *mm, struct uprobe *uprobe, unsigned long vaddr);
+extern int __weak set_orig_insn(struct mm_struct *mm, struct uprobe *uprobe, unsigned long vaddr, bool verify);
 extern bool __weak is_bkpt_insn(uprobe_opcode_t *insn);
-extern int register_uprobe(struct inode *inode, loff_t offset,
-				struct uprobe_consumer *consumer);
-extern void unregister_uprobe(struct inode *inode, loff_t offset,
-				struct uprobe_consumer *consumer);
-extern int mmap_uprobe(struct vm_area_struct *vma);
+extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer);
+extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer);
+extern int uprobe_mmap(struct vm_area_struct *vma);
 #else /* CONFIG_UPROBES is not defined */
-static inline int register_uprobe(struct inode *inode, loff_t offset,
-				struct uprobe_consumer *consumer)
+static inline int
+uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
 {
 	return -ENOSYS;
 }
-static inline void unregister_uprobe(struct inode *inode, loff_t offset,
-				struct uprobe_consumer *consumer)
+static inline void
+uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
 {
 }
-static inline int mmap_uprobe(struct vm_area_struct *vma)
+static inline int uprobe_mmap(struct vm_area_struct *vma)
 {
 	return 0;
 }
diff --git a/kernel/uprobes.c b/kernel/uprobes.c
index 72e8bb3b52cd..884817f1b0d3 100644
--- a/kernel/uprobes.c
+++ b/kernel/uprobes.c
@@ -1,5 +1,5 @@
 /*
- * Userspace Probes (UProbes)
+ * User-space Probes (UProbes)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -29,24 +29,26 @@
 #include <linux/rmap.h>		/* anon_vma_prepare */
 #include <linux/mmu_notifier.h>	/* set_pte_at_notify */
 #include <linux/swap.h>		/* try_to_free_swap */
+
 #include <linux/uprobes.h>
 
 static struct rb_root uprobes_tree = RB_ROOT;
+
 static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
 
 #define UPROBES_HASH_SZ	13
+
 /* serialize (un)register */
 static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
-#define uprobes_hash(v)	(&uprobes_mutex[((unsigned long)(v)) %\
-						UPROBES_HASH_SZ])
+
+#define uprobes_hash(v)		(&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
 
 /* serialize uprobe->pending_list */
 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
-#define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) %\
-						UPROBES_HASH_SZ])
+#define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
 
 /*
- * uprobe_events allows us to skip the mmap_uprobe if there are no uprobe
+ * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
  * events active at this time.  Probably a fine grained per inode count is
  * better?
  */
@@ -58,9 +60,9 @@ static atomic_t uprobe_events = ATOMIC_INIT(0);
  * vm_area_struct wasnt recommended.
  */
 struct vma_info {
-	struct list_head probe_list;
-	struct mm_struct *mm;
-	loff_t vaddr;
+	struct list_head	probe_list;
+	struct mm_struct	*mm;
+	loff_t			vaddr;
 };
 
 /*
@@ -79,8 +81,7 @@ static bool valid_vma(struct vm_area_struct *vma, bool is_register)
 	if (!is_register)
 		return true;
 
-	if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) ==
-						(VM_READ|VM_EXEC))
+	if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) == (VM_READ|VM_EXEC))
 		return true;
 
 	return false;
@@ -92,6 +93,7 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
 
 	vaddr = vma->vm_start + offset;
 	vaddr -= vma->vm_pgoff << PAGE_SHIFT;
+
 	return vaddr;
 }
 
@@ -105,8 +107,7 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
  *
  * Returns 0 on success, -EFAULT on failure.
  */
-static int __replace_page(struct vm_area_struct *vma, struct page *page,
-					struct page *kpage)
+static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	pgd_t *pgd;
@@ -163,7 +164,7 @@ static int __replace_page(struct vm_area_struct *vma, struct page *page,
  */
 bool __weak is_bkpt_insn(uprobe_opcode_t *insn)
 {
-	return (*insn == UPROBES_BKPT_INSN);
+	return *insn == UPROBES_BKPT_INSN;
 }
 
 /*
@@ -203,6 +204,7 @@ static int write_opcode(struct mm_struct *mm, struct uprobe *uprobe,
 	ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
 	if (ret <= 0)
 		return ret;
+
 	ret = -EINVAL;
 
 	/*
@@ -239,6 +241,7 @@ static int write_opcode(struct mm_struct *mm, struct uprobe *uprobe,
 	vaddr_new = kmap_atomic(new_page);
 
 	memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
+
 	/* poke the new insn in, ASSUMES we don't cross page boundary */
 	vaddr &= ~PAGE_MASK;
 	BUG_ON(vaddr + uprobe_opcode_sz > PAGE_SIZE);
@@ -260,7 +263,8 @@ static int write_opcode(struct mm_struct *mm, struct uprobe *uprobe,
 	page_cache_release(new_page);
 
 put_out:
-	put_page(old_page);	/* we did a get_page in the beginning */
+	put_page(old_page);
+
 	return ret;
 }
 
@@ -276,8 +280,7 @@ static int write_opcode(struct mm_struct *mm, struct uprobe *uprobe,
  * For mm @mm, read the opcode at @vaddr and store it in @opcode.
  * Return 0 (success) or a negative errno.
  */
-static int read_opcode(struct mm_struct *mm, unsigned long vaddr,
-						uprobe_opcode_t *opcode)
+static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode)
 {
 	struct page *page;
 	void *vaddr_new;
@@ -293,15 +296,18 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr,
 	memcpy(opcode, vaddr_new + vaddr, uprobe_opcode_sz);
 	kunmap_atomic(vaddr_new);
 	unlock_page(page);
-	put_page(page);		/* we did a get_user_pages in the beginning */
+
+	put_page(page);
+
 	return 0;
 }
 
 static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr)
 {
 	uprobe_opcode_t opcode;
-	int result = read_opcode(mm, vaddr, &opcode);
+	int result;
 
+	result = read_opcode(mm, vaddr, &opcode);
 	if (result)
 		return result;
 
@@ -320,11 +326,11 @@ static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr)
  * For mm @mm, store the breakpoint instruction at @vaddr.
  * Return 0 (success) or a negative errno.
  */
-int __weak set_bkpt(struct mm_struct *mm, struct uprobe *uprobe,
-						unsigned long vaddr)
+int __weak set_bkpt(struct mm_struct *mm, struct uprobe *uprobe, unsigned long vaddr)
 {
-	int result = is_bkpt_at_addr(mm, vaddr);
+	int result;
 
+	result = is_bkpt_at_addr(mm, vaddr);
 	if (result == 1)
 		return -EEXIST;
 
@@ -344,35 +350,35 @@ int __weak set_bkpt(struct mm_struct *mm, struct uprobe *uprobe,
  * For mm @mm, restore the original opcode (opcode) at @vaddr.
  * Return 0 (success) or a negative errno.
  */
-int __weak set_orig_insn(struct mm_struct *mm, struct uprobe *uprobe,
-					unsigned long vaddr, bool verify)
+int __weak
+set_orig_insn(struct mm_struct *mm, struct uprobe *uprobe, unsigned long vaddr, bool verify)
 {
 	if (verify) {
-		int result = is_bkpt_at_addr(mm, vaddr);
+		int result;
 
+		result = is_bkpt_at_addr(mm, vaddr);
 		if (!result)
 			return -EINVAL;
 
 		if (result != 1)
 			return result;
 	}
-	return write_opcode(mm, uprobe, vaddr,
-				*(uprobe_opcode_t *)uprobe->insn);
+	return write_opcode(mm, uprobe, vaddr, *(uprobe_opcode_t *)uprobe->insn);
 }
 
 static int match_uprobe(struct uprobe *l, struct uprobe *r)
 {
 	if (l->inode < r->inode)
 		return -1;
+
 	if (l->inode > r->inode)
 		return 1;
-	else {
-		if (l->offset < r->offset)
-			return -1;
 
-		if (l->offset > r->offset)
-			return 1;
-	}
+	if (l->offset < r->offset)
+		return -1;
+
+	if (l->offset > r->offset)
+		return 1;
 
 	return 0;
 }
@@ -391,6 +397,7 @@ static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
 			atomic_inc(&uprobe->ref);
 			return uprobe;
 		}
+
 		if (match < 0)
 			n = n->rb_left;
 		else
@@ -411,6 +418,7 @@ static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
 	spin_lock_irqsave(&uprobes_treelock, flags);
 	uprobe = __find_uprobe(inode, offset);
 	spin_unlock_irqrestore(&uprobes_treelock, flags);
+
 	return uprobe;
 }
 
@@ -436,16 +444,18 @@ static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
 			p = &parent->rb_right;
 
 	}
+
 	u = NULL;
 	rb_link_node(&uprobe->rb_node, parent, p);
 	rb_insert_color(&uprobe->rb_node, &uprobes_tree);
 	/* get access + creation ref */
 	atomic_set(&uprobe->ref, 2);
+
 	return u;
 }
 
 /*
- * Acquires uprobes_treelock.
+ * Acquire uprobes_treelock.
  * Matching uprobe already exists in rbtree;
  *	increment (access refcount) and return the matching uprobe.
  *
@@ -460,6 +470,7 @@ static struct uprobe *insert_uprobe(struct uprobe *uprobe)
 	spin_lock_irqsave(&uprobes_treelock, flags);
 	u = __insert_uprobe(uprobe);
 	spin_unlock_irqrestore(&uprobes_treelock, flags);
+
 	return u;
 }
 
@@ -490,19 +501,22 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
 		kfree(uprobe);
 		uprobe = cur_uprobe;
 		iput(inode);
-	} else
+	} else {
 		atomic_inc(&uprobe_events);
+	}
+
 	return uprobe;
 }
 
 /* Returns the previous consumer */
-static struct uprobe_consumer *add_consumer(struct uprobe *uprobe,
-				struct uprobe_consumer *consumer)
+static struct uprobe_consumer *
+consumer_add(struct uprobe *uprobe, struct uprobe_consumer *consumer)
 {
 	down_write(&uprobe->consumer_rwsem);
 	consumer->next = uprobe->consumers;
 	uprobe->consumers = consumer;
 	up_write(&uprobe->consumer_rwsem);
+
 	return consumer->next;
 }
 
@@ -511,8 +525,7 @@ static struct uprobe_consumer *add_consumer(struct uprobe *uprobe,
  * Return true if the @consumer is deleted successfully
  * or return false.
  */
-static bool del_consumer(struct uprobe *uprobe,
-				struct uprobe_consumer *consumer)
+static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *consumer)
 {
 	struct uprobe_consumer **con;
 	bool ret = false;
@@ -526,6 +539,7 @@ static bool del_consumer(struct uprobe *uprobe,
 		}
 	}
 	up_write(&uprobe->consumer_rwsem);
+
 	return ret;
 }
 
@@ -557,15 +571,15 @@ static int __copy_insn(struct address_space *mapping,
 	memcpy(insn, vaddr + off1, nbytes);
 	kunmap_atomic(vaddr);
 	page_cache_release(page);
+
 	return 0;
 }
 
-static int copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma,
-					unsigned long addr)
+static int copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
 {
 	struct address_space *mapping;
-	int bytes;
 	unsigned long nbytes;
+	int bytes;
 
 	addr &= ~PAGE_MASK;
 	nbytes = PAGE_SIZE - addr;
@@ -605,6 +619,7 @@ static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
 		return -EEXIST;
 
 	addr = (unsigned long)vaddr;
+
 	if (!(uprobe->flags & UPROBES_COPY_INSN)) {
 		ret = copy_insn(uprobe, vma, addr);
 		if (ret)
@@ -613,7 +628,7 @@ static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
 		if (is_bkpt_insn((uprobe_opcode_t *)uprobe->insn))
 			return -EEXIST;
 
-		ret = analyze_insn(mm, uprobe);
+		ret = arch_uprobes_analyze_insn(mm, uprobe);
 		if (ret)
 			return ret;
 
@@ -624,8 +639,7 @@ static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
 	return ret;
 }
 
-static void remove_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
-							loff_t vaddr)
+static void remove_breakpoint(struct mm_struct *mm, struct uprobe *uprobe, loff_t vaddr)
 {
 	set_orig_insn(mm, uprobe, (unsigned long)vaddr, true);
 }
@@ -649,9 +663,11 @@ static struct vma_info *__find_next_vma_info(struct list_head *head,
 	struct prio_tree_iter iter;
 	struct vm_area_struct *vma;
 	struct vma_info *tmpvi;
-	loff_t vaddr;
-	unsigned long pgoff = offset >> PAGE_SHIFT;
+	unsigned long pgoff;
 	int existing_vma;
+	loff_t vaddr;
+
+	pgoff = offset >> PAGE_SHIFT;
 
 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
 		if (!valid_vma(vma, is_register))
@@ -659,6 +675,7 @@ static struct vma_info *__find_next_vma_info(struct list_head *head,
 
 		existing_vma = 0;
 		vaddr = vma_address(vma, offset);
+
 		list_for_each_entry(tmpvi, head, probe_list) {
 			if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) {
 				existing_vma = 1;
@@ -670,14 +687,15 @@ static struct vma_info *__find_next_vma_info(struct list_head *head,
 		 * Another vma needs a probe to be installed. However skip
 		 * installing the probe if the vma is about to be unlinked.
 		 */
-		if (!existing_vma &&
-				atomic_inc_not_zero(&vma->vm_mm->mm_users)) {
+		if (!existing_vma && atomic_inc_not_zero(&vma->vm_mm->mm_users)) {
 			vi->mm = vma->vm_mm;
 			vi->vaddr = vaddr;
 			list_add(&vi->probe_list, head);
+
 			return vi;
 		}
 	}
+
 	return NULL;
 }
 
@@ -685,11 +703,12 @@ static struct vma_info *__find_next_vma_info(struct list_head *head,
  * Iterate in the rmap prio tree  and find a vma where a probe has not
  * yet been inserted.
  */
-static struct vma_info *find_next_vma_info(struct list_head *head,
-			loff_t offset, struct address_space *mapping,
-			bool is_register)
+static struct vma_info *
+find_next_vma_info(struct list_head *head, loff_t offset, struct address_space *mapping,
+		   bool is_register)
 {
 	struct vma_info *vi, *retvi;
+
 	vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL);
 	if (!vi)
 		return ERR_PTR(-ENOMEM);
@@ -700,6 +719,7 @@ static struct vma_info *find_next_vma_info(struct list_head *head,
 
 	if (!retvi)
 		kfree(vi);
+
 	return retvi;
 }
 
@@ -711,16 +731,23 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
 	struct vma_info *vi, *tmpvi;
 	struct mm_struct *mm;
 	loff_t vaddr;
-	int ret = 0;
+	int ret;
 
 	mapping = uprobe->inode->i_mapping;
 	INIT_LIST_HEAD(&try_list);
-	while ((vi = find_next_vma_info(&try_list, uprobe->offset,
-					mapping, is_register)) != NULL) {
+
+	ret = 0;
+
+	for (;;) {
+		vi = find_next_vma_info(&try_list, uprobe->offset, mapping, is_register);
+		if (!vi)
+			break;
+
 		if (IS_ERR(vi)) {
 			ret = PTR_ERR(vi);
 			break;
 		}
+
 		mm = vi->mm;
 		down_read(&mm->mmap_sem);
 		vma = find_vma(mm, (unsigned long)vi->vaddr);
@@ -755,19 +782,21 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
 				break;
 		}
 	}
+
 	list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) {
 		list_del(&vi->probe_list);
 		kfree(vi);
 	}
+
 	return ret;
 }
 
-static int __register_uprobe(struct uprobe *uprobe)
+static int __uprobe_register(struct uprobe *uprobe)
 {
 	return register_for_each_vma(uprobe, true);
 }
 
-static void __unregister_uprobe(struct uprobe *uprobe)
+static void __uprobe_unregister(struct uprobe *uprobe)
 {
 	if (!register_for_each_vma(uprobe, false))
 		delete_uprobe(uprobe);
@@ -776,15 +805,15 @@ static void __unregister_uprobe(struct uprobe *uprobe)
 }
 
 /*
- * register_uprobe - register a probe
+ * uprobe_register - register a probe
  * @inode: the file in which the probe has to be placed.
  * @offset: offset from the start of the file.
  * @consumer: information on howto handle the probe..
  *
- * Apart from the access refcount, register_uprobe() takes a creation
+ * Apart from the access refcount, uprobe_register() takes a creation
  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
  * inserted into the rbtree (i.e first consumer for a @inode:@offset
- * tuple).  Creation refcount stops unregister_uprobe from freeing the
+ * tuple).  Creation refcount stops uprobe_unregister from freeing the
  * @uprobe even before the register operation is complete. Creation
  * refcount is released when the last @consumer for the @uprobe
  * unregisters.
@@ -792,28 +821,29 @@ static void __unregister_uprobe(struct uprobe *uprobe)
  * Return errno if it cannot successully install probes
  * else return 0 (success)
  */
-int register_uprobe(struct inode *inode, loff_t offset,
-				struct uprobe_consumer *consumer)
+int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
 {
 	struct uprobe *uprobe;
-	int ret = -EINVAL;
+	int ret;
 
 	if (!inode || !consumer || consumer->next)
-		return ret;
+		return -EINVAL;
 
 	if (offset > i_size_read(inode))
-		return ret;
+		return -EINVAL;
 
 	ret = 0;
 	mutex_lock(uprobes_hash(inode));
 	uprobe = alloc_uprobe(inode, offset);
-	if (uprobe && !add_consumer(uprobe, consumer)) {
-		ret = __register_uprobe(uprobe);
+
+	if (uprobe && !consumer_add(uprobe, consumer)) {
+		ret = __uprobe_register(uprobe);
 		if (ret) {
 			uprobe->consumers = NULL;
-			__unregister_uprobe(uprobe);
-		} else
+			__uprobe_unregister(uprobe);
+		} else {
 			uprobe->flags |= UPROBES_RUN_HANDLER;
+		}
 	}
 
 	mutex_unlock(uprobes_hash(inode));
@@ -823,15 +853,14 @@ int register_uprobe(struct inode *inode, loff_t offset,
 }
 
 /*
- * unregister_uprobe - unregister a already registered probe.
+ * uprobe_unregister - unregister a already registered probe.
  * @inode: the file in which the probe has to be removed.
  * @offset: offset from the start of the file.
  * @consumer: identify which probe if multiple probes are colocated.
  */
-void unregister_uprobe(struct inode *inode, loff_t offset,
-				struct uprobe_consumer *consumer)
+void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
 {
-	struct uprobe *uprobe = NULL;
+	struct uprobe *uprobe;
 
 	if (!inode || !consumer)
 		return;
@@ -841,15 +870,14 @@ void unregister_uprobe(struct inode *inode, loff_t offset,
 		return;
 
 	mutex_lock(uprobes_hash(inode));
-	if (!del_consumer(uprobe, consumer))
-		goto unreg_out;
 
-	if (!uprobe->consumers) {
-		__unregister_uprobe(uprobe);
-		uprobe->flags &= ~UPROBES_RUN_HANDLER;
+	if (consumer_del(uprobe, consumer)) {
+		if (!uprobe->consumers) {
+			__uprobe_unregister(uprobe);
+			uprobe->flags &= ~UPROBES_RUN_HANDLER;
+		}
 	}
 
-unreg_out:
 	mutex_unlock(uprobes_hash(inode));
 	if (uprobe)
 		put_uprobe(uprobe);
@@ -870,6 +898,7 @@ static struct rb_node *find_least_offset_node(struct inode *inode)
 	while (n) {
 		uprobe = rb_entry(n, struct uprobe, rb_node);
 		match = match_uprobe(&u, uprobe);
+
 		if (uprobe->inode == inode)
 			close_node = n;
 
@@ -881,6 +910,7 @@ static struct rb_node *find_least_offset_node(struct inode *inode)
 		else
 			n = n->rb_right;
 	}
+
 	return close_node;
 }
 
@@ -890,11 +920,13 @@ static struct rb_node *find_least_offset_node(struct inode *inode)
 static void build_probe_list(struct inode *inode, struct list_head *head)
 {
 	struct uprobe *uprobe;
-	struct rb_node *n;
 	unsigned long flags;
+	struct rb_node *n;
 
 	spin_lock_irqsave(&uprobes_treelock, flags);
+
 	n = find_least_offset_node(inode);
+
 	for (; n; n = rb_next(n)) {
 		uprobe = rb_entry(n, struct uprobe, rb_node);
 		if (uprobe->inode != inode)
@@ -903,6 +935,7 @@ static void build_probe_list(struct inode *inode, struct list_head *head)
 		list_add(&uprobe->pending_list, head);
 		atomic_inc(&uprobe->ref);
 	}
+
 	spin_unlock_irqrestore(&uprobes_treelock, flags);
 }
 
@@ -912,42 +945,44 @@ static void build_probe_list(struct inode *inode, struct list_head *head)
  *
  * Return -ve no if we fail to insert probes and we cannot
  * bail-out.
- * Return 0 otherwise. i.e :
+ * Return 0 otherwise. i.e:
+ *
  *	- successful insertion of probes
  *	- (or) no possible probes to be inserted.
  *	- (or) insertion of probes failed but we can bail-out.
  */
-int mmap_uprobe(struct vm_area_struct *vma)
+int uprobe_mmap(struct vm_area_struct *vma)
 {
 	struct list_head tmp_list;
 	struct uprobe *uprobe, *u;
 	struct inode *inode;
-	int ret = 0;
+	int ret;
 
 	if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
-		return ret;	/* Bail-out */
+		return 0;
 
 	inode = vma->vm_file->f_mapping->host;
 	if (!inode)
-		return ret;
+		return 0;
 
 	INIT_LIST_HEAD(&tmp_list);
 	mutex_lock(uprobes_mmap_hash(inode));
 	build_probe_list(inode, &tmp_list);
+
+	ret = 0;
+
 	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
 		loff_t vaddr;
 
 		list_del(&uprobe->pending_list);
 		if (!ret) {
 			vaddr = vma_address(vma, uprobe->offset);
-			if (vaddr < vma->vm_start || vaddr >= vma->vm_end) {
-				put_uprobe(uprobe);
-				continue;
+			if (vaddr >= vma->vm_start && vaddr < vma->vm_end) {
+				ret = install_breakpoint(vma->vm_mm, uprobe, vma, vaddr);
+				/* Ignore double add: */
+				if (ret == -EEXIST)
+					ret = 0;
 			}
-			ret = install_breakpoint(vma->vm_mm, uprobe, vma,
-								vaddr);
-			if (ret == -EEXIST)
-				ret = 0;
 		}
 		put_uprobe(uprobe);
 	}
diff --git a/mm/mmap.c b/mm/mmap.c
index 1aed183636d7..5a863d328a44 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -618,10 +618,10 @@ again:			remove_next = 1 + (end > next->vm_end);
 		mutex_unlock(&mapping->i_mmap_mutex);
 
 	if (root) {
-		mmap_uprobe(vma);
+		uprobe_mmap(vma);
 
 		if (adjust_next)
-			mmap_uprobe(next);
+			uprobe_mmap(next);
 	}
 
 	if (remove_next) {
@@ -646,7 +646,7 @@ again:			remove_next = 1 + (end > next->vm_end);
 		}
 	}
 	if (insert && file)
-		mmap_uprobe(insert);
+		uprobe_mmap(insert);
 
 	validate_mm(mm);
 
@@ -1340,7 +1340,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	} else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
 		make_pages_present(addr, addr + len);
 
-	if (file && mmap_uprobe(vma))
+	if (file && uprobe_mmap(vma))
 		/* matching probes but cannot insert */
 		goto unmap_and_free_vma;
 
@@ -2301,7 +2301,7 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
 	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
 		return -ENOMEM;
 
-	if (vma->vm_file && mmap_uprobe(vma))
+	if (vma->vm_file && uprobe_mmap(vma))
 		return -EINVAL;
 
 	vma_link(mm, vma, prev, rb_link, rb_parent);
@@ -2374,7 +2374,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
 			if (new_vma->vm_file) {
 				get_file(new_vma->vm_file);
 
-				if (mmap_uprobe(new_vma))
+				if (uprobe_mmap(new_vma))
 					goto out_free_mempol;
 
 				if (vma->vm_flags & VM_EXECUTABLE)

commit d1e169da9e20efc0762da6ec160dd7740d0103f5
Merge: f8d98f109521 808e122630d4
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Feb 17 09:02:28 2012 +0100

    Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
    
    Includes smaller fixes and improvements plus the exclude_{host,guest} feature
    test and fallback to handle older kernels.
    
    Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

commit 7915a2e902df9fdb5ea5d5786a4172b38d0ea71e
Merge: 35f1790e6c6a 23783f817bce
Author: Ingo Molnar <mingo@elte.hu>
Date:   Tue Feb 7 11:33:48 2012 +0100

    Merge branch 'linus' into x86/boot
    
    Merge this into x86/boot so that we can queue up dependent patches.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

commit 52fce956a7568504a1da90c14b9bf2deaad401bd
Merge: 290436c9c61a 9dac6a29e0ce
Author: Ingo Molnar <mingo@elte.hu>
Date:   Tue Feb 7 10:00:06 2012 +0100

    Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
    
    perf/core fixes and improvements.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>