]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
ehea: Fix DLPAR memory add support
authorThomas Klein <osstklei@de.ibm.com>
Fri, 4 Apr 2008 13:04:53 +0000 (15:04 +0200)
committerJeff Garzik <jgarzik@redhat.com>
Sat, 12 Apr 2008 08:47:50 +0000 (04:47 -0400)
This patch fixes two weaknesses in send/receive packet handling which may
lead to kernel panics during DLPAR memory add operations.

Signed-off-by: Thomas Klein <tklein@de.ibm.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
drivers/net/ehea/ehea.h
drivers/net/ehea/ehea_main.c

index 93b7fb2469604d55c338c0ba0bf7da4f9cd520e4..a8d3280923e85483f386ecccee4908af2f8e87d8 100644 (file)
@@ -40,7 +40,7 @@
 #include <asm/io.h>
 
 #define DRV_NAME       "ehea"
-#define DRV_VERSION    "EHEA_0089"
+#define DRV_VERSION    "EHEA_0090"
 
 /* eHEA capability flags */
 #define DLPAR_PORT_ADD_REM 1
@@ -371,6 +371,7 @@ struct ehea_port_res {
        struct ehea_q_skb_arr rq2_skba;
        struct ehea_q_skb_arr rq3_skba;
        struct ehea_q_skb_arr sq_skba;
+       int sq_skba_size;
        spinlock_t netif_queue;
        int queue_stopped;
        int swqe_refill_th;
index 07c742dd3f0947131507f8e2a1c6251f4e2300c9..f460b623c0771356acca11780e06b62efac2e0c4 100644 (file)
@@ -349,7 +349,8 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
        pr->rq1_skba.os_skbs = 0;
 
        if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
-               pr->rq1_skba.index = index;
+               if (nr_of_wqes > 0)
+                       pr->rq1_skba.index = index;
                pr->rq1_skba.os_skbs = fill_wqes;
                return;
        }
@@ -1464,7 +1465,9 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
                          init_attr->act_nr_rwqes_rq2,
                          init_attr->act_nr_rwqes_rq3);
 
-       ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1);
+       pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
+
+       ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
        ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
        ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
        ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
@@ -2621,6 +2624,22 @@ void ehea_purge_sq(struct ehea_qp *orig_qp)
        }
 }
 
+void ehea_flush_sq(struct ehea_port *port)
+{
+       int i;
+
+       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+               struct ehea_port_res *pr = &port->port_res[i];
+               int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
+               int k = 0;
+               while (atomic_read(&pr->swqe_avail) < swqe_max) {
+                       msleep(5);
+                       if (++k == 20)
+                               break;
+               }
+       }
+}
+
 int ehea_stop_qps(struct net_device *dev)
 {
        struct ehea_port *port = netdev_priv(dev);
@@ -2845,6 +2864,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
                                        if (dev->flags & IFF_UP) {
                                                down(&port->port_lock);
                                                netif_stop_queue(dev);
+                                               ehea_flush_sq(port);
                                                ret = ehea_stop_qps(dev);
                                                if (ret) {
                                                        up(&port->port_lock);