]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
sgi-gru: support multiple pagesizes in GRU
authorJack Steiner <steiner@sgi.com>
Thu, 2 Apr 2009 23:59:12 +0000 (16:59 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 3 Apr 2009 02:05:07 +0000 (19:05 -0700)
Add multiple pagesize support to the GRU driver.

Signed-off-by: Jack Steiner <steiner@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/misc/sgi-gru/grufault.c
drivers/misc/sgi-gru/gruhandles.c
drivers/misc/sgi-gru/gruhandles.h
drivers/misc/sgi-gru/grukservices.c
drivers/misc/sgi-gru/grumain.c
drivers/misc/sgi-gru/grutables.h

index 15507d31e62bd5c467c52034796d0418adce154b..ab118558552e96a3fea1071f406b47d88514e905 100644 (file)
@@ -360,6 +360,13 @@ static int gru_try_dropin(struct gru_thread_state *gts,
        if (ret == -2)
                goto failupm;
 
+       if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
+               gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
+               if (atomic || !gru_update_cch(gts, 0)) {
+                       gts->ts_force_cch_reload = 1;
+                       goto failupm;
+               }
+       }
        gru_cb_set_istatus_active(cb);
        tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
                          GRU_PAGESIZE(pageshift));
@@ -535,6 +542,14 @@ int gru_handle_user_call_os(unsigned long cb)
                gts->ts_force_unload = 1;
        }
 
+       /*
+        * CCH may contain stale data if ts_force_cch_reload is set.
+        */
+       if (gts->ts_gru && gts->ts_force_cch_reload) {
+               gru_update_cch(gts, 0);
+               gts->ts_force_cch_reload = 0;
+       }
+
        ret = -EAGAIN;
        cbrnum = thread_cbr_number(gts, ucbnum);
        if (gts->ts_force_unload) {
index 9dcf5d8966838e5cc605aff8f273083b322d5f45..9b7ccb328697654e4770235a3531a25445988a70 100644 (file)
@@ -72,42 +72,16 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
        return status;
 }
 
-#if defined CONFIG_IA64
-static void cch_allocate_set_asids(
-                 struct gru_context_configuration_handle *cch, int asidval)
+int cch_allocate(struct gru_context_configuration_handle *cch,
+               int asidval, int sizeavail, unsigned long cbrmap,
+               unsigned long dsrmap)
 {
        int i;
 
        for (i = 0; i < 8; i++) {
                cch->asid[i] = (asidval++);
-#if 0
-               /* ZZZ hugepages not supported yet */
-               if (i == RGN_HPAGE)
-                       cch->sizeavail[i] = GRU_SIZEAVAIL(hpage_shift);
-               else
-#endif
-                       cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT);
+               cch->sizeavail[i] = sizeavail;
        }
-}
-#elif defined CONFIG_X86_64
-static void cch_allocate_set_asids(
-                 struct gru_context_configuration_handle *cch, int asidval)
-{
-       int i;
-
-       for (i = 0; i < 8; i++) {
-               cch->asid[i] = asidval++;
-               cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT) |
-                       GRU_SIZEAVAIL(21);
-       }
-}
-#endif
-
-int cch_allocate(struct gru_context_configuration_handle *cch,
-                              int asidval, unsigned long cbrmap,
-                              unsigned long dsrmap)
-{
-       cch_allocate_set_asids(cch, asidval);
        cch->dsr_allocation_map = dsrmap;
        cch->cbr_allocation_map = cbrmap;
        cch->opc = CCHOP_ALLOCATE;
index dfce8ea5da243c70b4d0ea777bf486388b802fa2..1ed74d7508c8bb14181c181a07925d04041c6283 100644 (file)
@@ -496,7 +496,7 @@ enum gru_cbr_state {
 #define GRUMAXINVAL            1024UL
 
 int cch_allocate(struct gru_context_configuration_handle *cch,
-       int asidval, unsigned long cbrmap, unsigned long dsrmap);
+       int asidval, int sizeavail, unsigned long cbrmap, unsigned long dsrmap);
 
 int cch_start(struct gru_context_configuration_handle *cch);
 int cch_interrupt(struct gru_context_configuration_handle *cch);
index 6b1efe4472741a82b95809e0bfb2236db7aa16d4..d8bd7d84a7cf7117ca1e3f409b718e16a8ba1151 100644 (file)
@@ -672,7 +672,7 @@ int gru_kservices_init(struct gru_state *gru)
        cch->tlb_int_enable = 0;
        cch->tfm_done_bit_enable = 0;
        cch->unmap_enable = 1;
-       err = cch_allocate(cch, 0, cbr_map, dsr_map);
+       err = cch_allocate(cch, 0, 0, cbr_map, dsr_map);
        if (err) {
                gru_dbg(grudev,
                        "Unable to allocate kernel CCH: gid %d, err %d\n",
index 5fc7b5ecde66cf2696fc5e6728eb1b63e152fc24..ec3f7a17d221e01bd82bc23b6a64025263627c65 100644 (file)
@@ -326,6 +326,7 @@ static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
        gts->ts_vma = vma;
        gts->ts_tlb_int_select = -1;
        gts->ts_gms = gru_register_mmu_notifier();
+       gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
        if (!gts->ts_gms)
                goto err;
 
@@ -552,7 +553,8 @@ static void gru_load_context(struct gru_thread_state *gts)
                cch->tlb_int_select = gts->ts_tlb_int_select;
        }
        cch->tfm_done_bit_enable = 0;
-       err = cch_allocate(cch, asid, gts->ts_cbr_map, gts->ts_dsr_map);
+       err = cch_allocate(cch, asid, gts->ts_sizeavail, gts->ts_cbr_map,
+                               gts->ts_dsr_map);
        if (err) {
                gru_dbg(grudev,
                        "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
@@ -573,11 +575,12 @@ static void gru_load_context(struct gru_thread_state *gts)
 /*
  * Update fields in an active CCH:
  *     - retarget interrupts on local blade
+ *     - update sizeavail mask
  *     - force a delayed context unload by clearing the CCH asids. This
  *       forces TLB misses for new GRU instructions. The context is unloaded
  *       when the next TLB miss occurs.
  */
-static int gru_update_cch(struct gru_thread_state *gts, int int_select)
+int gru_update_cch(struct gru_thread_state *gts, int force_unload)
 {
        struct gru_context_configuration_handle *cch;
        struct gru_state *gru = gts->ts_gru;
@@ -591,9 +594,11 @@ static int gru_update_cch(struct gru_thread_state *gts, int int_select)
                        goto exit;
                if (cch_interrupt(cch))
                        BUG();
-               if (int_select >= 0) {
-                       gts->ts_tlb_int_select = int_select;
-                       cch->tlb_int_select = int_select;
+               if (!force_unload) {
+                       for (i = 0; i < 8; i++)
+                               cch->sizeavail[i] = gts->ts_sizeavail;
+                       gts->ts_tlb_int_select = gru_cpu_fault_map_id();
+                       cch->tlb_int_select = gru_cpu_fault_map_id();
                } else {
                        for (i = 0; i < 8; i++)
                                cch->asid[i] = 0;
@@ -625,7 +630,7 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
 
        gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
                gru_cpu_fault_map_id());
-       return gru_update_cch(gts, gru_cpu_fault_map_id());
+       return gru_update_cch(gts, 0);
 }
 
 
index 8e5c8e5bb5338211c5e2dc7492f3899cf5ee8a95..bf1eeb7553edda963bfea5b1b7f8de28cccf0b28 100644 (file)
@@ -361,6 +361,7 @@ struct gru_thread_state {
        long                    ts_user_options;/* misc user option flags */
        pid_t                   ts_tgid_owner;  /* task that is using the
                                                   context - for migration */
+       unsigned short          ts_sizeavail;   /* Pagesizes in use */
        int                     ts_tsid;        /* thread that owns the
                                                   structure */
        int                     ts_tlb_int_select;/* target cpu if interrupts
@@ -374,6 +375,7 @@ struct gru_thread_state {
                                                   required for contest */
        char                    ts_blade;       /* If >= 0, migrate context if
                                                   ref from diferent blade */
+       char                    ts_force_cch_reload;
        char                    ts_force_unload;/* force context to be unloaded
                                                   after migration */
        char                    ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
@@ -597,6 +599,7 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct
 extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
                                *vma, int tsid);
 extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
+extern int gru_update_cch(struct gru_thread_state *gts, int force_unload);
 extern void gts_drop(struct gru_thread_state *gts);
 extern void gru_tgh_flush_init(struct gru_state *gru);
 extern int gru_kservices_init(struct gru_state *gru);