]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
ocfs2: allocate lockres hash pages in an array
authorDaniel Phillips <phillips@google.com>
Sat, 11 Mar 2006 02:08:16 +0000 (18:08 -0800)
committerMark Fasheh <mark.fasheh@oracle.com>
Mon, 26 Jun 2006 21:42:42 +0000 (14:42 -0700)
This allows us to have a hash table greater than a single page which greatly
improves dlm performance on some tests.

Signed-off-by: Daniel Phillips <phillips@google.com>
Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmdebug.c
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmrecovery.c

index 87612819c13b180479ac7f78a23d654c477456ad..0378ddbc8a8cc8f6f652be9a7caf79683294f617 100644 (file)
 #define DLM_THREAD_SHUFFLE_INTERVAL    5     // flush everything every 5 passes
 #define DLM_THREAD_MS                  200   // flush at least every 200 ms
 
-#define DLM_HASH_BUCKETS     (PAGE_SIZE / sizeof(struct hlist_head))
+#define DLM_HASH_SIZE          (1 << 14)
+#define DLM_HASH_PAGES         (DLM_HASH_SIZE / PAGE_SIZE)
+#define DLM_BUCKETS_PER_PAGE   (PAGE_SIZE / sizeof(struct hlist_head))
+#define DLM_HASH_BUCKETS       (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)
 
 /* Intended to make it easier for us to switch out hash functions */
 #define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
@@ -88,7 +91,7 @@ enum dlm_ctxt_state {
 struct dlm_ctxt
 {
        struct list_head list;
-       struct hlist_head *lockres_hash;
+       struct hlist_head **lockres_hash;
        struct list_head dirty_list;
        struct list_head purge_list;
        struct list_head pending_asts;
@@ -135,6 +138,11 @@ struct dlm_ctxt
        struct list_head        dlm_eviction_callbacks;
 };
 
+static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
+{
+       return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE);
+}
+
 /* these keventd work queue items are for less-frequently
  * called functions that cannot be directly called from the
  * net message handlers for some reason, usually because
index c7eae5d3324ea833192f74aefc2a69e5dd5db72a..e119e4ddf93f73b3d7c373f3aa44de4206a31474 100644 (file)
@@ -136,7 +136,7 @@ void dlm_dump_lock_resources(struct dlm_ctxt *dlm)
 
        spin_lock(&dlm->spinlock);
        for (i=0; i<DLM_HASH_BUCKETS; i++) {
-               bucket = &(dlm->lockres_hash[i]);
+               bucket = dlm_lockres_hash(dlm, i);
                hlist_for_each_entry(res, iter, bucket, hash_node)
                        dlm_print_one_lock_resource(res);
        }
index 595b1c7cbd1fdef69f47e33ee1c645c0c85d8dff..80b8cce9cf3c7ddd90fca506f67067187a216006 100644 (file)
 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
 #include "cluster/masklog.h"
 
+static void dlm_free_pagevec(void **vec, int pages)
+{
+       while (pages--)
+               free_page((unsigned long)vec[pages]);
+       kfree(vec);
+}
+
+static void **dlm_alloc_pagevec(int pages)
+{
+       void **vec = kmalloc(pages * sizeof(void *), GFP_KERNEL);
+       int i;
+
+       if (!vec)
+               return NULL;
+
+       for (i = 0; i < pages; i++)
+               if (!(vec[i] = (void *)__get_free_page(GFP_KERNEL)))
+                       goto out_free;
+       return vec;
+out_free:
+       dlm_free_pagevec(vec, i);
+       return NULL;
+}
+
 /*
  *
  * spinlock lock ordering: if multiple locks are needed, obey this ordering:
@@ -90,7 +114,7 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm,
        assert_spin_locked(&dlm->spinlock);
 
        q = &res->lockname;
-       bucket = &(dlm->lockres_hash[q->hash % DLM_HASH_BUCKETS]);
+       bucket = dlm_lockres_hash(dlm, q->hash);
 
        /* get a reference for our hashtable */
        dlm_lockres_get(res);
@@ -110,7 +134,8 @@ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
 
        assert_spin_locked(&dlm->spinlock);
 
-       bucket = dlm->lockres_hash + full_name_hash(name, len) % DLM_HASH_BUCKETS;
+       bucket = dlm_lockres_hash(dlm, hash);
+
        hlist_for_each(list, bucket) {
                struct dlm_lock_resource *res = hlist_entry(list,
                        struct dlm_lock_resource, hash_node);
@@ -191,7 +216,7 @@ static int dlm_wait_on_domain_helper(const char *domain)
 static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
 {
        if (dlm->lockres_hash)
-               free_page((unsigned long) dlm->lockres_hash);
+               dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
 
        if (dlm->name)
                kfree(dlm->name);
@@ -301,8 +326,8 @@ static void dlm_migrate_all_locks(struct dlm_ctxt *dlm)
 restart:
        spin_lock(&dlm->spinlock);
        for (i = 0; i < DLM_HASH_BUCKETS; i++) {
-               while (!hlist_empty(&dlm->lockres_hash[i])) {
-                       res = hlist_entry(dlm->lockres_hash[i].first,
+               while (!hlist_empty(dlm_lockres_hash(dlm, i))) {
+                       res = hlist_entry(dlm_lockres_hash(dlm, i)->first,
                                          struct dlm_lock_resource, hash_node);
                        /* need reference when manually grabbing lockres */
                        dlm_lockres_get(res);
@@ -1188,7 +1213,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
                goto leave;
        }
 
-       dlm->lockres_hash = (struct hlist_head *) __get_free_page(GFP_KERNEL);
+       dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES);
        if (!dlm->lockres_hash) {
                mlog_errno(-ENOMEM);
                kfree(dlm->name);
@@ -1197,8 +1222,8 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
                goto leave;
        }
 
-       for (i=0; i<DLM_HASH_BUCKETS; i++)
-               INIT_HLIST_HEAD(&dlm->lockres_hash[i]);
+       for (i = 0; i < DLM_HASH_BUCKETS; i++)
+               INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i));
 
        strcpy(dlm->name, domain);
        dlm->key = key;
index 4f3d482a7299b92dea7698aeb1879c0cfb854625..bf2a02eca62eca13964b40065f2d002d8c18ba7c 100644 (file)
@@ -1719,7 +1719,7 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
         * the RECOVERING state and set the owner
         * if necessary */
        for (i = 0; i < DLM_HASH_BUCKETS; i++) {
-               bucket = &(dlm->lockres_hash[i]);
+               bucket = dlm_lockres_hash(dlm, i);
                hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
                        if (res->state & DLM_LOCK_RES_RECOVERING) {
                                if (res->owner == dead_node) {
@@ -1884,7 +1884,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
         *    need to be fired as a result.
         */
        for (i = 0; i < DLM_HASH_BUCKETS; i++) {
-               bucket = &(dlm->lockres_hash[i]);
+               bucket = dlm_lockres_hash(dlm, i);
                hlist_for_each_entry(res, iter, bucket, hash_node) {
                        /* always prune any $RECOVERY entries for dead nodes,
                         * otherwise hangs can occur during later recovery */