]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
IB/ipath: Make send buffers available for kernel if not allocated to user
authorRalph Campbell <ralph.campbell@qlogic.com>
Thu, 17 Apr 2008 04:09:26 +0000 (21:09 -0700)
committerRoland Dreier <rolandd@cisco.com>
Thu, 17 Apr 2008 04:09:26 +0000 (21:09 -0700)
A fixed partitioning of send buffers is determined at driver load time
for user processes and kernel use.  Since send buffers are a scarce
resource, it makes sense to allow the kernel to use the buffers if they
are not in use by a user process.

Also, eliminate code duplication for ipath_force_pio_avail_update().

Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/ipath/ipath_diag.c
drivers/infiniband/hw/ipath/ipath_driver.c
drivers/infiniband/hw/ipath/ipath_file_ops.c
drivers/infiniband/hw/ipath/ipath_init_chip.c
drivers/infiniband/hw/ipath/ipath_intr.c
drivers/infiniband/hw/ipath/ipath_kernel.h
drivers/infiniband/hw/ipath/ipath_registers.h
drivers/infiniband/hw/ipath/ipath_verbs.c

index 96a1c4172f87cd58c1858089302a4ab28eba31c9..af59bf380ca265375e5605c24f75fa058f38785f 100644 (file)
@@ -439,7 +439,9 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
                goto bail;
        }
 
-       piobuf = ipath_getpiobuf(dd, &pbufn);
+       plen >>= 2;             /* in dwords */
+
+       piobuf = ipath_getpiobuf(dd, plen, &pbufn);
        if (!piobuf) {
                ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
                           dd->ipath_unit);
@@ -449,8 +451,6 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
        /* disarm it just to be extra sure */
        ipath_disarm_piobufs(dd, pbufn, 1);
 
-       plen >>= 2;             /* in dwords */
-
        if (ipath_debug & __IPATH_PKTDBG)
                ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
                           dd->ipath_unit, plen - 1, pbufn);
index 1299171c1e5024456d01d5e75f42640ed28251fb..216da97eddd46c0b326cf7e5a86b5e988acde53b 100644 (file)
@@ -317,7 +317,7 @@ static void ipath_verify_pioperf(struct ipath_devdata *dd)
        u32 *addr;
        u64 msecs, emsecs;
 
-       piobuf = ipath_getpiobuf(dd, &pbnum);
+       piobuf = ipath_getpiobuf(dd, 0, &pbnum);
        if (!piobuf) {
                dev_info(&dd->pcidev->dev,
                        "No PIObufs for checking perf, skipping\n");
@@ -836,20 +836,8 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
                ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
                spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
        }
-
-       /*
-        * Disable PIOAVAILUPD, then re-enable, reading scratch in
-        * between.  This seems to avoid a chip timing race that causes
-        * pioavail updates to memory to stop.  We xor as we don't
-        * know the state of the bit when we're called.
-        */
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-               dd->ipath_sendctrl ^ INFINIPATH_S_PIOBUFAVAILUPD);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                        dd->ipath_sendctrl);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+       /* on some older chips, update may not happen after cancel */
+       ipath_force_pio_avail_update(dd);
 }
 
 /**
@@ -1314,7 +1302,6 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd)
         * happens when all buffers are in use, so only cpu overhead, not
         * latency or bandwidth is affected.
         */
-#define _IPATH_ALL_CHECKBITS 0x5555555555555555ULL
        if (!dd->ipath_pioavailregs_dma) {
                ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
                return;
@@ -1359,7 +1346,7 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd)
                        piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
                else
                        piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
-               pchg = _IPATH_ALL_CHECKBITS &
+               pchg = dd->ipath_pioavailkernel[i] &
                        ~(dd->ipath_pioavailshadow[i] ^ piov);
                pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
                if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
@@ -1410,27 +1397,63 @@ int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
        return ret;
 }
 
-/**
- * ipath_getpiobuf - find an available pio buffer
- * @dd: the infinipath device
- * @pbufnum: the buffer number is placed here
+/*
+ * debugging code and stats updates if no pio buffers available.
+ */
+static noinline void no_pio_bufs(struct ipath_devdata *dd)
+{
+       unsigned long *shadow = dd->ipath_pioavailshadow;
+       __le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
+
+       dd->ipath_upd_pio_shadow = 1;
+
+       /*
+        * not atomic, but if we lose a stat count in a while, that's OK
+        */
+       ipath_stats.sps_nopiobufs++;
+       if (!(++dd->ipath_consec_nopiobuf % 100000)) {
+               ipath_dbg("%u pio sends with no bufavail; dmacopy: "
+                       "%llx %llx %llx %llx; shadow:  %lx %lx %lx %lx\n",
+                       dd->ipath_consec_nopiobuf,
+                       (unsigned long long) le64_to_cpu(dma[0]),
+                       (unsigned long long) le64_to_cpu(dma[1]),
+                       (unsigned long long) le64_to_cpu(dma[2]),
+                       (unsigned long long) le64_to_cpu(dma[3]),
+                       shadow[0], shadow[1], shadow[2], shadow[3]);
+               /*
+                * 4 buffers per byte, 4 registers above, cover rest
+                * below
+                */
+               if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
+                   (sizeof(shadow[0]) * 4 * 4))
+                       ipath_dbg("2nd group: dmacopy: %llx %llx "
+                                 "%llx %llx; shadow: %lx %lx %lx %lx\n",
+                                 (unsigned long long)le64_to_cpu(dma[4]),
+                                 (unsigned long long)le64_to_cpu(dma[5]),
+                                 (unsigned long long)le64_to_cpu(dma[6]),
+                                 (unsigned long long)le64_to_cpu(dma[7]),
+                                 shadow[4], shadow[5], shadow[6],
+                                 shadow[7]);
+       }
+}
+
+/*
+ * common code for normal driver pio buffer allocation, and reserved
+ * allocation.
  *
  * do appropriate marking as busy, etc.
  * returns buffer number if one found (>=0), negative number is error.
- * Used by ipath_layer_send
  */
-u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
+static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
+       u32 *pbufnum, u32 first, u32 last, u32 firsti)
 {
-       int i, j, starti, updated = 0;
-       unsigned piobcnt, iter;
+       int i, j, updated = 0;
+       unsigned piobcnt;
        unsigned long flags;
        unsigned long *shadow = dd->ipath_pioavailshadow;
        u32 __iomem *buf;
 
-       piobcnt = (unsigned)(dd->ipath_piobcnt2k
-                            + dd->ipath_piobcnt4k);
-       starti = dd->ipath_lastport_piobuf;
-       iter = piobcnt - starti;
+       piobcnt = last - first;
        if (dd->ipath_upd_pio_shadow) {
                /*
                 * Minor optimization.  If we had no buffers on last call,
@@ -1438,12 +1461,10 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
                 * if no buffers were updated, to be paranoid
                 */
                ipath_update_pio_bufs(dd);
-               /* we scanned here, don't do it at end of scan */
-               updated = 1;
-               i = starti;
+               updated++;
+               i = first;
        } else
-               i = dd->ipath_lastpioindex;
-
+               i = firsti;
 rescan:
        /*
         * while test_and_set_bit() is atomic, we do that and then the
@@ -1451,103 +1472,140 @@ rescan:
         * of the remaining armlaunch errors.
         */
        spin_lock_irqsave(&ipath_pioavail_lock, flags);
-       for (j = 0; j < iter; j++, i++) {
-               if (i >= piobcnt)
-                       i = starti;
-               /*
-                * To avoid bus lock overhead, we first find a candidate
-                * buffer, then do the test and set, and continue if that
-                * fails.
-                */
-               if (test_bit((2 * i) + 1, shadow) ||
-                   test_and_set_bit((2 * i) + 1, shadow))
+       for (j = 0; j < piobcnt; j++, i++) {
+               if (i >= last)
+                       i = first;
+               if (__test_and_set_bit((2 * i) + 1, shadow))
                        continue;
                /* flip generation bit */
-               change_bit(2 * i, shadow);
+               __change_bit(2 * i, shadow);
                break;
        }
        spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
 
-       if (j == iter) {
-               volatile __le64 *dma = dd->ipath_pioavailregs_dma;
-
-               /*
-                * first time through; shadow exhausted, but may be real
-                * buffers available, so go see; if any updated, rescan
-                * (once)
-                */
+       if (j == piobcnt) {
                if (!updated) {
+                       /*
+                        * first time through; shadow exhausted, but may be
+                        * buffers available, try an update and then rescan.
+                        */
                        ipath_update_pio_bufs(dd);
-                       updated = 1;
-                       i = starti;
+                       updated++;
+                       i = first;
                        goto rescan;
-               }
-               dd->ipath_upd_pio_shadow = 1;
-               /*
-                * not atomic, but if we lose one once in a while, that's OK
-                */
-               ipath_stats.sps_nopiobufs++;
-               if (!(++dd->ipath_consec_nopiobuf % 100000)) {
-                       ipath_dbg(
-                               "%u pio sends with no bufavail; dmacopy: "
-                               "%llx %llx %llx %llx; shadow:  "
-                               "%lx %lx %lx %lx\n",
-                               dd->ipath_consec_nopiobuf,
-                               (unsigned long long) le64_to_cpu(dma[0]),
-                               (unsigned long long) le64_to_cpu(dma[1]),
-                               (unsigned long long) le64_to_cpu(dma[2]),
-                               (unsigned long long) le64_to_cpu(dma[3]),
-                               shadow[0], shadow[1], shadow[2],
-                               shadow[3]);
+               } else if (updated == 1 && piobcnt <=
+                       ((dd->ipath_sendctrl
+                       >> INFINIPATH_S_UPDTHRESH_SHIFT) &
+                       INFINIPATH_S_UPDTHRESH_MASK)) {
                        /*
-                        * 4 buffers per byte, 4 registers above, cover rest
-                        * below
+                        * for chips supporting and using the update
+                        * threshold we need to force an update of the
+                        * in-memory copy if the count is less than the
+                        * thershold, then check one more time.
                         */
-                       if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
-                           (sizeof(shadow[0]) * 4 * 4))
-                               ipath_dbg("2nd group: dmacopy: %llx %llx "
-                                         "%llx %llx; shadow: %lx %lx "
-                                         "%lx %lx\n",
-                                         (unsigned long long)
-                                         le64_to_cpu(dma[4]),
-                                         (unsigned long long)
-                                         le64_to_cpu(dma[5]),
-                                         (unsigned long long)
-                                         le64_to_cpu(dma[6]),
-                                         (unsigned long long)
-                                         le64_to_cpu(dma[7]),
-                                         shadow[4], shadow[5],
-                                         shadow[6], shadow[7]);
+                       ipath_force_pio_avail_update(dd);
+                       ipath_update_pio_bufs(dd);
+                       updated++;
+                       i = first;
+                       goto rescan;
                }
+
+               no_pio_bufs(dd);
                buf = NULL;
-               goto bail;
+       } else {
+               if (i < dd->ipath_piobcnt2k)
+                       buf = (u32 __iomem *) (dd->ipath_pio2kbase +
+                                              i * dd->ipath_palign);
+               else
+                       buf = (u32 __iomem *)
+                               (dd->ipath_pio4kbase +
+                                (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
+               if (pbufnum)
+                       *pbufnum = i;
        }
 
-       /*
-        * set next starting place.  Since it's just an optimization,
-        * it doesn't matter who wins on this, so no locking
-        */
-       dd->ipath_lastpioindex = i + 1;
-       if (dd->ipath_upd_pio_shadow)
-               dd->ipath_upd_pio_shadow = 0;
-       if (dd->ipath_consec_nopiobuf)
-               dd->ipath_consec_nopiobuf = 0;
-       if (i < dd->ipath_piobcnt2k)
-               buf = (u32 __iomem *) (dd->ipath_pio2kbase +
-                                      i * dd->ipath_palign);
-       else
-               buf = (u32 __iomem *)
-                       (dd->ipath_pio4kbase +
-                        (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
-       ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
-                  i, (i < dd->ipath_piobcnt2k) ? 2 : 4, buf);
-       if (pbufnum)
-               *pbufnum = i;
+       return buf;
+}
 
-bail:
+/**
+ * ipath_getpiobuf - find an available pio buffer
+ * @dd: the infinipath device
+ * @plen: the size of the PIO buffer needed in 32-bit words
+ * @pbufnum: the buffer number is placed here
+ */
+u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
+{
+       u32 __iomem *buf;
+       u32 pnum, nbufs;
+       u32 first, lasti;
+
+       if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
+               first = dd->ipath_piobcnt2k;
+               lasti = dd->ipath_lastpioindexl;
+       } else {
+               first = 0;
+               lasti = dd->ipath_lastpioindex;
+       }
+       nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
+       buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
+
+       if (buf) {
+               /*
+                * Set next starting place.  It's just an optimization,
+                * it doesn't matter who wins on this, so no locking
+                */
+               if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
+                       dd->ipath_lastpioindexl = pnum + 1;
+               else
+                       dd->ipath_lastpioindex = pnum + 1;
+               if (dd->ipath_upd_pio_shadow)
+                       dd->ipath_upd_pio_shadow = 0;
+               if (dd->ipath_consec_nopiobuf)
+                       dd->ipath_consec_nopiobuf = 0;
+               ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
+                          pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
+               if (pbufnum)
+                       *pbufnum = pnum;
+
+       }
        return buf;
 }
 
+/**
+ * ipath_chg_pioavailkernel - change which send buffers are available for kernel
+ * @dd: the infinipath device
+ * @start: the starting send buffer number
+ * @len: the number of send buffers
+ * @avail: true if the buffers are available for kernel use, false otherwise
+ */
+void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
+                             unsigned len, int avail)
+{
+       unsigned long flags;
+       unsigned end;
+
+       /* There are two bits per send buffer (busy and generation) */
+       start *= 2;
+       len *= 2;
+       end = start + len;
+
+       /* Set or clear the generation bits. */
+       spin_lock_irqsave(&ipath_pioavail_lock, flags);
+       while (start < end) {
+               if (avail) {
+                       __clear_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
+                               dd->ipath_pioavailshadow);
+                       __set_bit(start, dd->ipath_pioavailkernel);
+               } else {
+                       __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
+                               dd->ipath_pioavailshadow);
+                       __clear_bit(start, dd->ipath_pioavailkernel);
+               }
+               start += 2;
+       }
+       spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
+}
+
 /**
  * ipath_create_rcvhdrq - create a receive header queue
  * @dd: the infinipath device
@@ -1664,6 +1722,30 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
        ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
 }
 
+/*
+ * Force an update of in-memory copy of the pioavail registers, when
+ * needed for any of a variety of reasons.  We read the scratch register
+ * to make it highly likely that the update will have happened by the
+ * time we return.  If already off (as in cancel_sends above), this
+ * routine is a nop, on the assumption that the caller will "do the
+ * right thing".
+ */
+void ipath_force_pio_avail_update(struct ipath_devdata *dd)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+       if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
+               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+                       dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
+               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+                       dd->ipath_sendctrl);
+               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+       }
+       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+}
+
 static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
                                int linitcmd)
 {
index cddf29b9554e5b4b0057fc771eca75a0aa6a1654..1b232b23dbfc982edd6748949419dd14668a2c1f 100644 (file)
@@ -1603,6 +1603,9 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
                port_fp(fp) = pd;
                pd->port_pid = current->pid;
                strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
+               ipath_chg_pioavailkernel(dd,
+                       dd->ipath_pbufsport * (pd->port_port - 1),
+                       dd->ipath_pbufsport, 0);
                ipath_stats.sps_ports++;
                ret = 0;
        } else
@@ -2081,6 +2084,7 @@ static int ipath_close(struct inode *in, struct file *fp)
 
                i = dd->ipath_pbufsport * (port - 1);
                ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
+               ipath_chg_pioavailkernel(dd, i, dd->ipath_pbufsport, 1);
 
                dd->ipath_f_clear_tids(dd, pd->port_port);
 
@@ -2145,21 +2149,6 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
        return ret;
 }
 
-static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-               dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-       return 0;
-}
-
 static ssize_t ipath_write(struct file *fp, const char __user *data,
                           size_t count, loff_t *off)
 {
@@ -2304,7 +2293,7 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
                                           cmd.cmd.slave_mask_addr);
                break;
        case IPATH_CMD_PIOAVAILUPD:
-               ret = ipath_force_pio_avail_update(pd->port_dd);
+               ipath_force_pio_avail_update(pd->port_dd);
                break;
        case IPATH_CMD_POLL_TYPE:
                pd->poll_type = cmd.cmd.poll_type;
index 46c70656d538c6098062ef028464feadb038e7af..786a5e017fddf9a515c87a1fe7491bd5a9b196d4 100644 (file)
@@ -521,7 +521,9 @@ static void enable_chip(struct ipath_devdata *dd,
                        pioavail = dd->ipath_pioavailregs_dma[i ^ 1];
                else
                        pioavail = dd->ipath_pioavailregs_dma[i];
-               dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail);
+               dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail) |
+                       (~dd->ipath_pioavailkernel[i] <<
+                       INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
        }
        /* can get counters, stats, etc. */
        dd->ipath_flags |= IPATH_PRESENT;
@@ -743,7 +745,9 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
                ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n",
                          dd->ipath_pbufsport, val32);
        }
-       dd->ipath_lastpioindex = dd->ipath_lastport_piobuf;
+       dd->ipath_lastpioindex = 0;
+       dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;
+       ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
        ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
                   "each for %u user ports\n", kpiobufs,
                   piobufs, dd->ipath_pbufsport, uports);
index 5608e3268a62287bb89eeae3f6250984a08357db..d1e13a46093d10360b4dac2b0a9af653373940c7 100644 (file)
@@ -804,7 +804,6 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
 {
        int i, im;
        u64 val;
-       unsigned long flags;
 
        /* disable error interrupts, to avoid confusion */
        ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
@@ -823,14 +822,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
                         dd->ipath_control);
 
        /* ensure pio avail updates continue */
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                        dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+       ipath_force_pio_avail_update(dd);
 
        /*
         * We just enabled pioavailupdate, so dma copy is almost certainly
@@ -842,7 +834,9 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
                        i ^ 1 : i;
                val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im);
                dd->ipath_pioavailregs_dma[i] = cpu_to_le64(val);
-               dd->ipath_pioavailshadow[i] = val;
+               dd->ipath_pioavailshadow[i] = val |
+                       (~dd->ipath_pioavailkernel[i] <<
+                       INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
        }
 
        /*
index 2510359b1c836520f9e045ce777668232c33b57a..3a15af26b093f2b20e6e216934d7b945714734fd 100644 (file)
@@ -191,6 +191,9 @@ struct ipath_skbinfo {
        dma_addr_t phys;
 };
 
+/* max dwords in small buffer packet */
+#define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
+
 /*
  * Possible IB config parameters for ipath_f_get/set_ib_cfg()
  */
@@ -366,6 +369,7 @@ struct ipath_devdata {
         * get to multiple devices
         */
        u32 ipath_lastpioindex;
+       u32 ipath_lastpioindexl;
        /* max length of freezemsg */
        u32 ipath_freezelen;
        /*
@@ -453,6 +457,8 @@ struct ipath_devdata {
         * init time.
         */
        unsigned long ipath_pioavailshadow[8];
+       /* bitmap of send buffers available for the kernel to use with PIO. */
+       unsigned long ipath_pioavailkernel[8];
        /* shadow of kr_gpio_out, for rmw ops */
        u64 ipath_gpio_out;
        /* shadow the gpio mask register */
@@ -869,13 +875,16 @@ void ipath_hol_event(unsigned long);
 
 /* free up any allocated data at closes */
 void ipath_free_data(struct ipath_portdata *dd);
-u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
+u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
+void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
+                               unsigned len, int avail);
 void ipath_init_iba6120_funcs(struct ipath_devdata *);
 void ipath_init_iba6110_funcs(struct ipath_devdata *);
 void ipath_get_eeprom_info(struct ipath_devdata *);
 int ipath_update_eeprom_log(struct ipath_devdata *dd);
 void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
 u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
+void ipath_force_pio_avail_update(struct ipath_devdata *);
 void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
 
 /*
index 16d0d74f1dda576a44117463ed87cb34466bee84..61e562148496521d19911785128d5d81c4320371 100644 (file)
@@ -66,6 +66,8 @@
 
 /* kr_sendctrl bits */
 #define INFINIPATH_S_DISARMPIOBUF_SHIFT 16
+#define INFINIPATH_S_UPDTHRESH_SHIFT 24
+#define INFINIPATH_S_UPDTHRESH_MASK 0x1f
 
 #define IPATH_S_ABORT          0
 #define IPATH_S_PIOINTBUFAVAIL 1
index 2f9bc29313afde8e89501d22ea1f8199a0fa048c..2e6b6f6265b63073c0090214d7b9d075152db1d1 100644 (file)
@@ -875,7 +875,7 @@ static int ipath_verbs_send_pio(struct ipath_qp *qp, u32 *hdr, u32 hdrwords,
        unsigned flush_wc;
        int ret;
 
-       piobuf = ipath_getpiobuf(dd, NULL);
+       piobuf = ipath_getpiobuf(dd, plen, NULL);
        if (unlikely(piobuf == NULL)) {
                ret = -EBUSY;
                goto bail;