patch-2.2.11 linux/include/asm-alpha/mmu_context.h
Next file: linux/include/asm-alpha/pgtable.h
Previous file: linux/include/asm-alpha/machvec.h
Back to the patch index
Back to the overall index
- Lines: 233
- Date:
Mon Aug 9 12:04:41 1999
- Orig file:
v2.2.10/linux/include/asm-alpha/mmu_context.h
- Orig date:
Sat Jun 12 11:52:52 1999
diff -u --recursive --new-file v2.2.10/linux/include/asm-alpha/mmu_context.h linux/include/asm-alpha/mmu_context.h
@@ -26,11 +26,10 @@
* in use) and the Valid bit set, then entries can also effectively be
* made coherent by assigning a new, unused ASN to the currently
* running process and not reusing the previous ASN before calling the
- * appropriate PALcode routine to invalidate the translation buffer
- * (TB)".
+ * appropriate PALcode routine to invalidate the translation buffer (TB)".
*
* In short, the EV4 has a "kind of" ASN capability, but it doesn't actually
- * work correctly and can thus not be used (explaining the lack of PAL-code
+ * work correctly and can thus not be used (explaining the lack of PALcode
* support).
*/
#define EV4_MAX_ASN 63
@@ -49,44 +48,40 @@
# endif
#endif
-#ifdef __SMP__
-#define WIDTH_THIS_PROCESSOR 5
/*
- * last_asn[processor]:
+ * cpu_last_asn(processor):
* 63 0
* +-------------+----------------+--------------+
* | asn version | this processor | hardware asn |
* +-------------+----------------+--------------+
*/
-extern unsigned long last_asn[];
-#define asn_cache last_asn[p->processor]
+#ifdef __SMP__
+#include <asm/smp.h>
+#define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn)
#else
-#define WIDTH_THIS_PROCESSOR 0
-/*
- * asn_cache:
- * 63 0
- * +------------------------------+--------------+
- * | asn version | hardware asn |
- * +------------------------------+--------------+
- */
-extern unsigned long asn_cache;
+extern unsigned long last_asn;
+#define cpu_last_asn(cpuid) last_asn
#endif /* __SMP__ */
#define WIDTH_HARDWARE_ASN 8
+#ifdef __SMP__
+#define WIDTH_THIS_PROCESSOR 5
+#else
+#define WIDTH_THIS_PROCESSOR 0
+#endif
#define ASN_FIRST_VERSION (1UL << (WIDTH_THIS_PROCESSOR + WIDTH_HARDWARE_ASN))
#define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1)
/*
* NOTE! The way this is set up, the high bits of the "asn_cache" (and
- * the "mm->context") are the ASN _version_ code. A version of 0 is
- * always considered invalid, so to invalidate another process you only
- * need to do "p->mm->context = 0".
+ * "mm->context") are the ASN _version_ code. A version of 0 is always
+ * considered invalid, so to invalidate another process you only need
+ * to do "p->mm->context = 0".
*
* If we need more ASN's than the processor has, we invalidate the old
* user TLB's (tbiap()) and start a new ASN version. That will automatically
- * force a new asn for any other processes the next time they want to
- * run.
+ * force a new asn for any other processes the next time they want to run.
*/
#ifndef __EXTERN_INLINE
@@ -96,22 +91,59 @@
extern void get_new_mmu_context(struct task_struct *p, struct mm_struct *mm);
-__EXTERN_INLINE void ev4_get_mmu_context(struct task_struct *p)
+static inline unsigned long
+__get_new_mmu_context(void)
{
- /* As described, ASN's are broken. */
+ unsigned long asn = cpu_last_asn(smp_processor_id());
+ unsigned long next = asn + 1;
+
+ /* If we've wrapped, flush the whole user TLB. */
+ if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) {
+ tbiap();
+ next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
+ }
+ cpu_last_asn(smp_processor_id()) = next;
+ return next;
}
-__EXTERN_INLINE void ev5_get_mmu_context(struct task_struct *p)
+__EXTERN_INLINE void
+ev4_get_mmu_context(struct task_struct *p)
{
- struct mm_struct * mm = p->mm;
+ /* As described, ASN's are broken. But we can optimize for
+ switching between threads -- if the mm is unchanged from
+ current we needn't flush. */
+ /* ??? May not be needed because EV4 PALcode recognizes that
+ ASN's are broken and does a tbiap itself on swpctx, under
+ the "Must set ASN or flush" rule. At least this is true
+ for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
+ I'm going to leave this here anyway, just to Be Sure. -- r~ */
+
+ if (current->mm != p->mm)
+ tbiap();
+}
- if (mm) {
- unsigned long asn = asn_cache;
- /* Check if our ASN is of an older version,
- or on a different CPU, and thus invalid. */
- if ((mm->context ^ asn) & ~HARDWARE_ASN_MASK)
- get_new_mmu_context(p, mm);
+__EXTERN_INLINE void
+ev5_get_mmu_context(struct task_struct *p)
+{
+ /* Check if our ASN is of an older version, or on a different CPU,
+ and thus invalid. */
+ /* ??? If we have two threads on different cpus, we'll continually
+ fight over the context. Find a way to record a per-mm, per-cpu
+ value for the asn. */
+
+ unsigned long asn = cpu_last_asn(smp_processor_id());
+ struct mm_struct *mm = p->mm;
+ unsigned long mmc = mm->context;
+
+ if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) {
+ mmc = __get_new_mmu_context();
+ mm->context = mmc;
}
+
+ /* Always update the PCB ASN. Another thread may have allocated
+ a new mm->context (via flush_tlb_mm) without the ASN serial
+ number wrapping. We have no way to detect when this is needed. */
+ p->tss.asn = mmc & HARDWARE_ASN_MASK;
}
#ifdef CONFIG_ALPHA_GENERIC
@@ -124,40 +156,40 @@
# endif
#endif
-extern inline void init_new_context(struct mm_struct *mm)
+extern inline void
+init_new_context(struct mm_struct *mm)
{
mm->context = 0;
}
-extern inline void destroy_context(struct mm_struct *mm)
+extern inline void
+destroy_context(struct mm_struct *mm)
{
/* Nothing to do. */
}
+#ifdef __MMU_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __MMU_EXTERN_INLINE
+#endif
/*
* Force a context reload. This is needed when we change the page
* table pointer or when we update the ASN of the current process.
*/
-#if defined(CONFIG_ALPHA_GENERIC)
-#define MASK_CONTEXT(tss) \
- ((struct thread_struct *)((unsigned long)(tss) & alpha_mv.mmu_context_mask))
-#elif defined(CONFIG_ALPHA_DP264)
-#define MASK_CONTEXT(tss) \
- ((struct thread_struct *)((unsigned long)(tss) & 0xfffffffffful))
-#else
-#define MASK_CONTEXT(tss) (tss)
+/* Don't get into trouble with dueling __EXTERN_INLINEs. */
+#ifndef __EXTERN_INLINE
+#include <asm/io.h>
#endif
-__EXTERN_INLINE struct thread_struct *
+extern inline unsigned long
__reload_tss(struct thread_struct *tss)
{
- register struct thread_struct *a0 __asm__("$16");
- register struct thread_struct *v0 __asm__("$0");
-
- a0 = MASK_CONTEXT(tss);
+ register unsigned long a0 __asm__("$16");
+ register unsigned long v0 __asm__("$0");
+ a0 = virt_to_phys(tss);
__asm__ __volatile__(
"call_pal %2 #__reload_tss"
: "=r"(v0), "=r"(a0)
@@ -167,27 +199,22 @@
return v0;
}
-__EXTERN_INLINE void
+extern inline void
reload_context(struct task_struct *task)
{
__reload_tss(&task->tss);
}
/*
- * After we have set current->mm to a new value, this activates the
- * context for the new mm so we see the new mappings.
+ * After setting current->mm to a new value, activate the context for the
+ * new mm so we see the new mappings.
*/
-__EXTERN_INLINE void
+extern inline void
activate_context(struct task_struct *task)
{
- get_mmu_context(task);
+ get_new_mmu_context(task, task->mm);
reload_context(task);
}
-
-#ifdef __MMU_EXTERN_INLINE
-#undef __EXTERN_INLINE
-#undef __MMU_EXTERN_INLINE
-#endif
#endif /* __ALPHA_MMU_CONTEXT_H */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)