#ifndef LINUX_HARDIRQ_H
#define LINUX_HARDIRQ_H
+#include <linux/config.h>
#include <linux/preempt.h>
#include <linux/smp_lock.h>
-#include <linux/lockdep.h>
#include <asm/hardirq.h>
#include <asm/system.h>
# define synchronize_irq(irq) barrier()
#endif
+#define nmi_enter() irq_enter()
+#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET)
+
struct task_struct;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
}
#endif
-/*
- * It is safe to do non-atomic ops on ->hardirq_context,
- * because NMI handlers may not preempt and the ops are
- * always balanced, so the interrupted value of ->hardirq_context
- * will always be restored.
- */
#define irq_enter() \
do { \
account_system_vtime(current); \
add_preempt_count(HARDIRQ_OFFSET); \
- trace_hardirq_enter(); \
- } while (0)
-
-/*
- * Exit irq context without processing softirqs:
- */
-#define __irq_exit() \
- do { \
- trace_hardirq_exit(); \
- account_system_vtime(current); \
- sub_preempt_count(HARDIRQ_OFFSET); \
} while (0)
-/*
- * Exit irq context and process softirqs if needed:
- */
extern void irq_exit(void);
-#define nmi_enter() do { lockdep_off(); irq_enter(); } while (0)
-#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0)
-
#endif /* LINUX_HARDIRQ_H */