]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
x86: remove open-coded save/load segment operations
authorJeremy Fitzhardinge <jeremy@goop.org>
Wed, 25 Jun 2008 04:19:00 +0000 (00:19 -0400)
committerIngo Molnar <mingo@elte.hu>
Tue, 8 Jul 2008 11:10:25 +0000 (13:10 +0200)
This removes a pile of buggy open-coded implementations of savesegment
and loadsegment.

(They are buggy because they don't have memory barriers to prevent
them from being reordered with respect to memory accesses.)

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: xen-devel <xen-devel@lists.xensource.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/common_64.c
arch/x86/kernel/process_64.c

index 39eaefbcec065b9dd3ca986cb43e596b937e90f0..751850235291c2663aa930a33c5cbda2e123b1e0 100644 (file)
@@ -480,7 +480,8 @@ void pda_init(int cpu)
        struct x8664_pda *pda = cpu_pda(cpu);
 
        /* Setup up data that may be needed in __get_free_pages early */
-       asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
+       loadsegment(fs, 0);
+       loadsegment(gs, 0);
        /* Memory clobbers used to order PDA accessed */
        mb();
        wrmsrl(MSR_GS_BASE, pda);
index 290183e9731a2c45e25d0855533d8a7bea69e74c..ddc6fcc73dc66a95da0a6f8f43beb29f529e8c23 100644 (file)
@@ -335,10 +335,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
        p->thread.fs = me->thread.fs;
        p->thread.gs = me->thread.gs;
 
-       asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
-       asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
-       asm("mov %%es,%0" : "=m" (p->thread.es));
-       asm("mov %%ds,%0" : "=m" (p->thread.ds));
+       savesegment(gs, p->thread.gsindex);
+       savesegment(fs, p->thread.fsindex);
+       savesegment(es, p->thread.es);
+       savesegment(ds, p->thread.ds);
 
        if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
                p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
@@ -377,7 +377,9 @@ out:
 void
 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 {
-       asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0));
+       loadsegment(fs, 0);
+       loadsegment(es, 0);
+       loadsegment(ds, 0);
        load_gs_index(0);
        regs->ip                = new_ip;
        regs->sp                = new_sp;
@@ -550,11 +552,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * Switch DS and ES.
         * This won't pick up thread selector changes, but I guess that is ok.
         */
-       asm volatile("mov %%es,%0" : "=m" (prev->es));
+       savesegment(es, prev->es);
        if (unlikely(next->es | prev->es))
                loadsegment(es, next->es); 
-       
-       asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
+
+       savesegment(ds, prev->ds);
        if (unlikely(next->ds | prev->ds))
                loadsegment(ds, next->ds);
 
@@ -565,7 +567,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         */
        { 
                unsigned fsindex;
-               asm volatile("movl %%fs,%0" : "=r" (fsindex)); 
+               savesegment(fs, fsindex);
                /* segment register != 0 always requires a reload. 
                   also reload when it has changed. 
                   when prev process used 64bit base always reload
@@ -586,7 +588,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        }
        { 
                unsigned gsindex;
-               asm volatile("movl %%gs,%0" : "=r" (gsindex)); 
+               savesegment(gs, gsindex);
                if (unlikely(gsindex | next->gsindex | prev->gs)) {
                        load_gs_index(next->gsindex);
                        if (gsindex)
@@ -767,7 +769,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                        set_32bit_tls(task, FS_TLS, addr);
                        if (doit) {
                                load_TLS(&task->thread, cpu);
-                               asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
+                               loadsegment(fs, FS_TLS_SEL);
                        }
                        task->thread.fsindex = FS_TLS_SEL;
                        task->thread.fs = 0;
@@ -777,7 +779,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                        if (doit) {
                                /* set the selector to 0 to not confuse
                                   __switch_to */
-                               asm volatile("movl %0,%%fs" :: "r" (0));
+                               loadsegment(fs, 0);
                                ret = checking_wrmsrl(MSR_FS_BASE, addr);
                        }
                }
@@ -800,7 +802,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                if (task->thread.gsindex == GS_TLS_SEL)
                        base = read_32bit_tls(task, GS_TLS);
                else if (doit) {
-                       asm("movl %%gs,%0" : "=r" (gsindex));
+                       savesegment(gs, gsindex);
                        if (gsindex)
                                rdmsrl(MSR_KERNEL_GS_BASE, base);
                        else