diff options
author | Srikant Patnaik | 2015-01-11 12:28:04 +0530 |
---|---|---|
committer | Srikant Patnaik | 2015-01-11 12:28:04 +0530 |
commit | 871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch) | |
tree | 8718f573808810c2a1e8cb8fb6ac469093ca2784 /include/linux/jump_label.h | |
parent | 9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff) | |
download | FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2 FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip |
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized.
Changes are basically to make it look like kernel structure.
Diffstat (limited to 'include/linux/jump_label.h')
-rw-r--r-- | include/linux/jump_label.h | 226 |
1 files changed, 226 insertions, 0 deletions
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h new file mode 100644 index 00000000..c513a405 --- /dev/null +++ b/include/linux/jump_label.h @@ -0,0 +1,226 @@ +#ifndef _LINUX_JUMP_LABEL_H +#define _LINUX_JUMP_LABEL_H + +/* + * Jump label support + * + * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com> + * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com> + * + * Jump labels provide an interface to generate dynamic branches using + * self-modifying code. Assuming toolchain and architecture support the result + * of a "if (static_key_false(&key))" statement is a unconditional branch (which + * defaults to false - and the true block is placed out of line). + * + * However at runtime we can change the branch target using + * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key + * object and for as long as there are references all branches referring to + * that particular key will point to the (out of line) true block. + * + * Since this relies on modifying code the static_key_slow_{inc,dec}() functions + * must be considered absolute slow paths (machine wide synchronization etc.). + * OTOH, since the affected branches are unconditional their runtime overhead + * will be absolutely minimal, esp. in the default (off) case where the total + * effect is a single NOP of appropriate size. The on case will patch in a jump + * to the out-of-line block. + * + * When the control is directly exposed to userspace it is prudent to delay the + * decrement to avoid high frequency code modifications which can (and do) + * cause significant performance degradation. Struct static_key_deferred and + * static_key_slow_dec_deferred() provide for this. + * + * Lacking toolchain and or architecture support, it falls back to a simple + * conditional branch. + * + * struct static_key my_key = STATIC_KEY_INIT_TRUE; + * + * if (static_key_true(&my_key)) { + * } + * + * will result in the true case being in-line and starts the key with a single + * reference. Mixing static_key_true() and static_key_false() on the same key is not + * allowed. + * + * Not initializing the key (static data is initialized to 0s anyway) is the + * same as using STATIC_KEY_INIT_FALSE and static_key_false() is + * equivalent with static_branch(). + * +*/ + +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/workqueue.h> + +#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) + +struct static_key { + atomic_t enabled; +/* Set lsb bit to 1 if branch is default true, 0 ot */ + struct jump_entry *entries; +#ifdef CONFIG_MODULES + struct static_key_mod *next; +#endif +}; + +struct static_key_deferred { + struct static_key key; + unsigned long timeout; + struct delayed_work work; +}; + +# include <asm/jump_label.h> +# define HAVE_JUMP_LABEL +#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */ + +enum jump_label_type { + JUMP_LABEL_DISABLE = 0, + JUMP_LABEL_ENABLE, +}; + +struct module; + +#ifdef HAVE_JUMP_LABEL + +#define JUMP_LABEL_TRUE_BRANCH 1UL + +static +inline struct jump_entry *jump_label_get_entries(struct static_key *key) +{ + return (struct jump_entry *)((unsigned long)key->entries + & ~JUMP_LABEL_TRUE_BRANCH); +} + +static inline bool jump_label_get_branch_default(struct static_key *key) +{ + if ((unsigned long)key->entries & JUMP_LABEL_TRUE_BRANCH) + return true; + return false; +} + +static __always_inline bool static_key_false(struct static_key *key) +{ + return arch_static_branch(key); +} + +static __always_inline bool static_key_true(struct static_key *key) +{ + return !static_key_false(key); +} + +/* Deprecated. Please use 'static_key_false() instead. */ +static __always_inline bool static_branch(struct static_key *key) +{ + return arch_static_branch(key); +} + +extern struct jump_entry __start___jump_table[]; +extern struct jump_entry __stop___jump_table[]; + +extern void jump_label_init(void); +extern void jump_label_lock(void); +extern void jump_label_unlock(void); +extern void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type); +extern void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type); +extern int jump_label_text_reserved(void *start, void *end); +extern void static_key_slow_inc(struct static_key *key); +extern void static_key_slow_dec(struct static_key *key); +extern void static_key_slow_dec_deferred(struct static_key_deferred *key); +extern void jump_label_apply_nops(struct module *mod); +extern void +jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); + +#define STATIC_KEY_INIT_TRUE ((struct static_key) \ + { .enabled = ATOMIC_INIT(1), .entries = (void *)1 }) +#define STATIC_KEY_INIT_FALSE ((struct static_key) \ + { .enabled = ATOMIC_INIT(0), .entries = (void *)0 }) + +#else /* !HAVE_JUMP_LABEL */ + +#include <linux/atomic.h> + +struct static_key { + atomic_t enabled; +}; + +static __always_inline void jump_label_init(void) +{ +} + +struct static_key_deferred { + struct static_key key; +}; + +static __always_inline bool static_key_false(struct static_key *key) +{ + if (unlikely(atomic_read(&key->enabled)) > 0) + return true; + return false; +} + +static __always_inline bool static_key_true(struct static_key *key) +{ + if (likely(atomic_read(&key->enabled)) > 0) + return true; + return false; +} + +/* Deprecated. Please use 'static_key_false() instead. */ +static __always_inline bool static_branch(struct static_key *key) +{ + if (unlikely(atomic_read(&key->enabled)) > 0) + return true; + return false; +} + +static inline void static_key_slow_inc(struct static_key *key) +{ + atomic_inc(&key->enabled); +} + +static inline void static_key_slow_dec(struct static_key *key) +{ + atomic_dec(&key->enabled); +} + +static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) +{ + static_key_slow_dec(&key->key); +} + +static inline int jump_label_text_reserved(void *start, void *end) +{ + return 0; +} + +static inline void jump_label_lock(void) {} +static inline void jump_label_unlock(void) {} + +static inline int jump_label_apply_nops(struct module *mod) +{ + return 0; +} + +static inline void +jump_label_rate_limit(struct static_key_deferred *key, + unsigned long rl) +{ +} + +#define STATIC_KEY_INIT_TRUE ((struct static_key) \ + { .enabled = ATOMIC_INIT(1) }) +#define STATIC_KEY_INIT_FALSE ((struct static_key) \ + { .enabled = ATOMIC_INIT(0) }) + +#endif /* HAVE_JUMP_LABEL */ + +#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE +#define jump_label_enabled static_key_enabled + +static inline bool static_key_enabled(struct static_key *key) +{ + return (atomic_read(&key->enabled) > 0); +} + +#endif /* _LINUX_JUMP_LABEL_H */ |