Home Home > GIT Browse > vanilla
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Morton <akpm@zip.com.au>2002-08-10 04:40:05 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-08-10 04:40:05 -0700
commit199143a8610cc639b7b9de2803fc812f99eb7951 (patch)
tree00ff41458d1721ca21e437a61481ac553c6dd68a
parent1dd4dd0c4e2ca1f088add68a89c522b93097cc48 (diff)
[PATCH] Infrastructure for atomic user accesses
Well the optimum solution there would be to create and use `inc_preempt_count_non_preempt()'. I don't see any way of embedding this in kmap_atomic() or copy_to_user_atomic() without loss of flexibility or incurring a double-inc somewhere.
-rw-r--r--arch/i386/mm/fault.c6
-rw-r--r--include/linux/preempt.h24
2 files changed, 25 insertions, 5 deletions
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index cd4083d09658..dcfb3912e674 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -181,10 +181,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
info.si_code = SEGV_MAPERR;
/*
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
+ * If we're in an interrupt, have no user context or are running in an
+ * atomic region then we must not take the fault..
*/
- if (in_interrupt() || !mm)
+ if (preempt_count() || !mm)
goto no_context;
down_read(&mm->mmap_sem);
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 172471f0dbde..3864d46eadba 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -5,19 +5,29 @@
#define preempt_count() (current_thread_info()->preempt_count)
+#define inc_preempt_count() \
+do { \
+ preempt_count()++; \
+} while (0)
+
+#define dec_preempt_count() \
+do { \
+ preempt_count()--; \
+} while (0)
+
#ifdef CONFIG_PREEMPT
extern void preempt_schedule(void);
#define preempt_disable() \
do { \
- preempt_count()++; \
+ inc_preempt_count(); \
barrier(); \
} while (0)
#define preempt_enable_no_resched() \
do { \
- preempt_count()--; \
+ dec_preempt_count(); \
barrier(); \
} while (0)
@@ -34,6 +44,9 @@ do { \
preempt_schedule(); \
} while (0)
+#define inc_preempt_count_non_preempt() do { } while (0)
+#define dec_preempt_count_non_preempt() do { } while (0)
+
#else
#define preempt_disable() do { } while (0)
@@ -41,6 +54,13 @@ do { \
#define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
+/*
+ * Sometimes we want to increment the preempt count, but we know that it's
+ * already incremented if the kernel is compiled for preemptibility.
+ */
+#define inc_preempt_count_non_preempt() inc_preempt_count()
+#define dec_preempt_count_non_preempt() dec_preempt_count()
+
#endif
#endif /* __LINUX_PREEMPT_H */