]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
x86: save %fs and %gs before load_TLS() and arch_leave_lazy_cpu_mode()
authorJeremy Fitzhardinge <jeremy@goop.org>
Wed, 25 Jun 2008 04:19:24 +0000 (00:19 -0400)
committerIngo Molnar <mingo@elte.hu>
Tue, 8 Jul 2008 11:11:11 +0000 (13:11 +0200)
We must do this because load_TLS() may need to clear %fs and %gs.
(e.g. under Xen).

Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: xen-devel <xen-devel@lists.xensource.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/process_64.c

index 488eaca47bd8c7a2842be634edbdfc423c13b9aa..db5eb963e4df2e7d573012f96bc3696eff6b52ca 100644 (file)
@@ -538,6 +538,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                                 *next = &next_p->thread;
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(init_tss, cpu);
+       unsigned fsindex, gsindex;
 
        /* we're going to use this soon, after a few expensive things */
        if (next_p->fpu_counter>5)
@@ -560,6 +561,15 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        if (unlikely(next->ds | prev->ds))
                loadsegment(ds, next->ds);
 
+
+       /* We must save %fs and %gs before load_TLS() because
+        * %fs and %gs may be cleared by load_TLS().
+        *
+        * (e.g. xen_load_tls())
+        */
+       savesegment(fs, fsindex);
+       savesegment(gs, gsindex);
+
        load_TLS(next, cpu);
 
        /*
@@ -575,8 +585,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * Switch FS and GS.
         */
        { 
-               unsigned fsindex;
-               savesegment(fs, fsindex);
                /* segment register != 0 always requires a reload. 
                   also reload when it has changed. 
                   when prev process used 64bit base always reload
@@ -594,10 +602,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                if (next->fs) 
                        wrmsrl(MSR_FS_BASE, next->fs); 
                prev->fsindex = fsindex;
-       }
-       { 
-               unsigned gsindex;
-               savesegment(gs, gsindex);
+
                if (unlikely(gsindex | next->gsindex | prev->gs)) {
                        load_gs_index(next->gsindex);
                        if (gsindex)