summaryrefslogtreecommitdiff
path: root/include/linux/nmi.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/nmi.h')
-rw-r--r--include/linux/nmi.h56
1 files changed, 56 insertions, 0 deletions
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
new file mode 100644
index 00000000..db50840e
--- /dev/null
+++ b/include/linux/nmi.h
@@ -0,0 +1,56 @@
+/*
+ * linux/include/linux/nmi.h
+ */
+#ifndef LINUX_NMI_H
+#define LINUX_NMI_H
+
+#include <linux/sched.h>
+#include <asm/irq.h>
+
+/**
+ * touch_nmi_watchdog - restart NMI watchdog timeout.
+ *
+ * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
+ * may be used to reset the timeout - for code which intentionally
+ * disables interrupts for a long time. This call is stateless.
+ */
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+#include <asm/nmi.h>
+extern void touch_nmi_watchdog(void);
+#else
+static inline void touch_nmi_watchdog(void)
+{
+ touch_softlockup_watchdog();
+}
+#endif
+
+/*
+ * Create trigger_all_cpu_backtrace() out of the arch-provided
+ * base function. Return whether such support was available,
+ * to allow calling code to fall back to some other mechanism:
+ */
+#ifdef arch_trigger_all_cpu_backtrace
+static inline bool trigger_all_cpu_backtrace(void)
+{
+ arch_trigger_all_cpu_backtrace();
+
+ return true;
+}
+#else
+static inline bool trigger_all_cpu_backtrace(void)
+{
+ return false;
+}
+#endif
+
+#ifdef CONFIG_LOCKUP_DETECTOR
+int hw_nmi_is_cpu_stuck(struct pt_regs *);
+u64 hw_nmi_get_sample_period(int watchdog_thresh);
+extern int watchdog_enabled;
+extern int watchdog_thresh;
+struct ctl_table;
+extern int proc_dowatchdog(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+#endif
+
+#endif