static spinlock_t ast_queue_lock;
static struct task_struct * astd_task;
static unsigned long astd_wakeflags;
-static struct semaphore astd_running;
+static struct mutex astd_running;
void dlm_del_ast(struct dlm_lkb *lkb)
int type = 0, found, bmode;
for (;;) {
- found = FALSE;
+ found = 0;
spin_lock(&ast_queue_lock);
list_for_each_entry(lkb, &ast_queue, lkb_astqueue) {
r = lkb->lkb_resource;
list_del(&lkb->lkb_astqueue);
type = lkb->lkb_ast_type;
lkb->lkb_ast_type = 0;
- found = TRUE;
+ found = 1;
break;
}
spin_unlock(&ast_queue_lock);
schedule();
set_current_state(TASK_RUNNING);
- down(&astd_running);
+ mutex_lock(&astd_running);
if (test_and_clear_bit(WAKE_ASTS, &astd_wakeflags))
process_asts();
- up(&astd_running);
+ mutex_unlock(&astd_running);
}
return 0;
}
INIT_LIST_HEAD(&ast_queue);
spin_lock_init(&ast_queue_lock);
- init_MUTEX(&astd_running);
+ mutex_init(&astd_running);
p = kthread_run(dlm_astd, NULL, "dlm_astd");
if (IS_ERR(p))
void dlm_astd_suspend(void)
{
- down(&astd_running);
+ mutex_lock(&astd_running);
}
void dlm_astd_resume(void)
{
- up(&astd_running);
+ mutex_unlock(&astd_running);
}
struct space {
struct config_group group;
struct list_head members;
- struct semaphore members_lock;
+ struct mutex members_lock;
int members_count;
};
sp->group.default_groups[1] = NULL;
INIT_LIST_HEAD(&sp->members);
- init_MUTEX(&sp->members_lock);
+ mutex_init(&sp->members_lock);
sp->members_count = 0;
return &sp->group;
nd->nodeid = -1;
nd->weight = 1; /* default weight of 1 if none is set */
- down(&sp->members_lock);
+ mutex_lock(&sp->members_lock);
list_add(&nd->list, &sp->members);
sp->members_count++;
- up(&sp->members_lock);
+ mutex_unlock(&sp->members_lock);
return &nd->item;
}
struct space *sp = to_space(g->cg_item.ci_parent);
struct node *nd = to_node(i);
- down(&sp->members_lock);
+ mutex_lock(&sp->members_lock);
list_del(&nd->list);
sp->members_count--;
- up(&sp->members_lock);
+ mutex_unlock(&sp->members_lock);
config_item_put(i);
}
if (!sp)
return -EEXIST;
- down(&sp->members_lock);
+ mutex_lock(&sp->members_lock);
if (!sp->members_count) {
rv = 0;
goto out;
*ids_out = ids;
out:
- up(&sp->members_lock);
+ mutex_unlock(&sp->members_lock);
put_space(sp);
return rv;
}
if (!sp)
goto out;
- down(&sp->members_lock);
+ mutex_lock(&sp->members_lock);
list_for_each_entry(nd, &sp->members, list) {
if (nd->nodeid != nodeid)
continue;
w = nd->weight;
break;
}
- up(&sp->members_lock);
+ mutex_unlock(&sp->members_lock);
put_space(sp);
out:
return w;
/* FIXME: this warns on Alpha */
if (lkb->lkb_status == DLM_LKSTS_CONVERT
|| lkb->lkb_status == DLM_LKSTS_GRANTED)
- seq_printf(s, " %" PRIx64 "-%" PRIx64,
+ seq_printf(s, " %llx-%llx",
lkb->lkb_range[GR_RANGE_START],
lkb->lkb_range[GR_RANGE_END]);
if (lkb->lkb_status == DLM_LKSTS_CONVERT
|| lkb->lkb_status == DLM_LKSTS_WAITING)
- seq_printf(s, " (%" PRIx64 "-%" PRIx64 ")",
+ seq_printf(s, " (%llx-%llx)",
lkb->lkb_range[RQ_RANGE_START],
lkb->lkb_range[RQ_RANGE_END]);
}
static struct file_operations _dlm_fops;
static const char *name_prefix="dlm";
static struct list_head user_ls_list;
-static struct semaphore user_ls_lock;
+static struct mutex user_ls_lock;
/* Lock infos are stored in here indexed by lock ID */
static DEFINE_IDR(lockinfo_idr);
#define LI_FLAG_COMPLETE 1
#define LI_FLAG_FIRSTLOCK 2
#define LI_FLAG_PERSISTENT 3
+#define LI_FLAG_ONLIST 4
/* flags in ls_flags*/
#define LS_FLAG_DELETED 1
{
struct user_ls *lsinfo;
- down(&user_ls_lock);
+ mutex_lock(&user_ls_lock);
lsinfo = __find_lockspace(minor);
- up(&user_ls_lock);
+ mutex_unlock(&user_ls_lock);
return lsinfo;
}
static void add_lockspace_to_list(struct user_ls *lsinfo)
{
- down(&user_ls_lock);
+ mutex_lock(&user_ls_lock);
list_add(&lsinfo->ls_list, &user_ls_list);
- up(&user_ls_lock);
+ mutex_unlock(&user_ls_lock);
}
/* Register a lockspace with the DLM and create a misc
namelen = strlen(name)+strlen(name_prefix)+2;
- newls = kmalloc(sizeof(struct user_ls), GFP_KERNEL);
+ newls = kzalloc(sizeof(struct user_ls), GFP_KERNEL);
if (!newls)
return -ENOMEM;
- memset(newls, 0, sizeof(struct user_ls));
- newls->ls_miscinfo.name = kmalloc(namelen, GFP_KERNEL);
+ newls->ls_miscinfo.name = kzalloc(namelen, GFP_KERNEL);
if (!newls->ls_miscinfo.name) {
kfree(newls);
return -ENOMEM;
return 0;
}
-/* Called with the user_ls_lock semaphore held */
+/* Called with the user_ls_lock mutex held */
static int unregister_lockspace(struct user_ls *lsinfo, int force)
{
int status;
static void add_to_astqueue(struct lock_info *li, void *astaddr, void *astparam,
int lvb_updated)
{
- struct ast_info *ast = kmalloc(sizeof(struct ast_info), GFP_KERNEL);
+ struct ast_info *ast = kzalloc(sizeof(struct ast_info), GFP_KERNEL);
if (!ast)
return;
- memset(ast, 0, sizeof(*ast));
ast->result.user_astparam = astparam;
ast->result.user_astaddr = astaddr;
ast->result.user_lksb = li->li_user_lksb;
spin_lock(&li->li_file->fi_li_lock);
list_del(&li->li_ownerqueue);
+ clear_bit(LI_FLAG_ONLIST, &li->li_flags);
spin_unlock(&li->li_file->fi_li_lock);
release_lockinfo(li);
return;
if (!lsinfo)
return -ENOENT;
- f = kmalloc(sizeof(struct file_info), GFP_KERNEL);
+ f = kzalloc(sizeof(struct file_info), GFP_KERNEL);
if (!f)
return -ENOMEM;
* then free the struct. If it's an AUTOFREE lockspace
* then free the whole thing.
*/
- down(&user_ls_lock);
+ mutex_lock(&user_ls_lock);
if (atomic_dec_and_test(&lsinfo->ls_refcnt)) {
if (lsinfo->ls_lockspace) {
kfree(lsinfo);
}
}
- up(&user_ls_lock);
+ mutex_unlock(&user_ls_lock);
put_file_info(f);
/* Restore signals */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- down(&user_ls_lock);
+ mutex_lock(&user_ls_lock);
lsinfo = __find_lockspace(kparams->minor);
if (!lsinfo) {
- up(&user_ls_lock);
+ mutex_unlock(&user_ls_lock);
return -EINVAL;
}
force = 2;
status = unregister_lockspace(lsinfo, force);
- up(&user_ls_lock);
+ mutex_unlock(&user_ls_lock);
return status;
}
if (!try_module_get(THIS_MODULE))
return NULL;
- li = kmalloc(sizeof(struct lock_info), GFP_KERNEL);
+ li = kzalloc(sizeof(struct lock_info), GFP_KERNEL);
if (li) {
li->li_magic = LOCKINFO_MAGIC;
li->li_file = fi;
/* If this is a persistent lock we will have to create a
lockinfo again */
- if (!li && DLM_LKF_PERSISTENT) {
+ if (!li && (kparams->flags & DLM_LKF_PERSISTENT)) {
li = allocate_lockinfo(fi, cmd, kparams);
+ if (!li)
+ return -ENOMEM;
li->li_lksb.sb_lkid = kparams->lkid;
li->li_castaddr = kparams->castaddr;
spin_lock(&fi->fi_li_lock);
list_add(&li->li_ownerqueue, &fi->fi_li_list);
+ set_bit(LI_FLAG_ONLIST, &li->li_flags);
spin_unlock(&fi->fi_li_lock);
if (add_lockinfo(li))
printk(KERN_WARNING "Add lockinfo failed\n");
li = get_lockinfo(kparams->lkid);
if (!li) {
li = allocate_lockinfo(fi, cmd, kparams);
+ if (!li)
+ return -ENOMEM;
spin_lock(&fi->fi_li_lock);
list_add(&li->li_ownerqueue, &fi->fi_li_list);
+ set_bit(LI_FLAG_ONLIST, &li->li_flags);
spin_unlock(&fi->fi_li_lock);
}
- if (!li)
- return -ENOMEM;
if (li->li_magic != LOCKINFO_MAGIC)
return -EINVAL;
if (kparams->flags & DLM_LKF_CANCEL && li->li_grmode != -1)
convert_cancel = 1;
+ /* Wait until dlm_lock() has completed */
+ if (!test_bit(LI_FLAG_ONLIST, &li->li_flags)) {
+ down(&li->li_firstlock);
+ up(&li->li_firstlock);
+ }
+
/* dlm_unlock() passes a 0 for castaddr which means don't overwrite
the existing li_castaddr as that's the completion routine for
unlocks. dlm_unlock_wait() specifies a new AST routine to be
if (!status && !convert_cancel) {
spin_lock(&fi->fi_li_lock);
list_del(&li->li_ownerqueue);
+ clear_bit(LI_FLAG_ONLIST, &li->li_flags);
spin_unlock(&fi->fi_li_lock);
}
int r;
INIT_LIST_HEAD(&user_ls_list);
- init_MUTEX(&user_ls_lock);
+ mutex_init(&user_ls_lock);
rwlock_init(&lockinfo_lock);
ctl_device.name = "dlm-control";
static struct dlm_direntry *get_free_de(struct dlm_ls *ls, int len)
{
- int found = FALSE;
+ int found = 0;
struct dlm_direntry *de;
spin_lock(&ls->ls_recover_list_lock);
list_del(&de->list);
de->master_nodeid = 0;
memset(de->name, 0, len);
- found = TRUE;
+ found = 1;
break;
}
}
#include <linux/kref.h>
#include <linux/kernel.h>
#include <linux/jhash.h>
+#include <linux/mutex.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
#define DLM_LOCKSPACE_LEN 64
-#ifndef TRUE
-#define TRUE 1
-#endif
-
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-#if (BITS_PER_LONG == 64)
-#define PRIx64 "lx"
-#else
-#define PRIx64 "Lx"
-#endif
-
/* Size of the temp buffer midcomms allocates on the stack.
We try to make this large enough so most messages fit.
FIXME: should sctp make this unnecessary? */
struct dlm_rsb {
struct dlm_ls *res_ls; /* the lockspace */
struct kref res_ref;
- struct semaphore res_sem;
+ struct mutex res_mutex;
unsigned long res_flags;
int res_length; /* length of rsb name */
int res_nodeid;
struct dlm_dirtable *ls_dirtbl;
uint32_t ls_dirtbl_size;
- struct semaphore ls_waiters_sem;
+ struct mutex ls_waiters_mutex;
struct list_head ls_waiters; /* lkbs needing a reply */
struct list_head ls_nodes; /* current nodes in ls */
struct timer_list ls_timer;
struct task_struct *ls_recoverd_task;
- struct semaphore ls_recoverd_active;
+ struct mutex ls_recoverd_active;
spinlock_t ls_recover_lock;
uint32_t ls_recover_status; /* DLM_RS_ */
uint64_t ls_recover_seq;
struct dlm_recover *ls_recover_args;
struct rw_semaphore ls_in_recovery; /* block local requests */
struct list_head ls_requestqueue;/* queue remote requests */
- struct semaphore ls_requestqueue_lock;
+ struct mutex ls_requestqueue_mutex;
char *ls_recover_buf;
struct list_head ls_recover_list;
spinlock_t ls_recover_list_lock;
{
if (lkb->lkb_flags & DLM_IFL_MSTCPY)
DLM_ASSERT(lkb->lkb_nodeid, dlm_print_lkb(lkb););
- return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? TRUE : FALSE;
+ return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
}
static inline int middle_conversion(struct dlm_lkb *lkb)
{
if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
(lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
- return TRUE;
- return FALSE;
+ return 1;
+ return 0;
}
static inline int down_conversion(struct dlm_lkb *lkb)
r->res_ls = ls;
r->res_length = len;
memcpy(r->res_name, name, len);
- init_MUTEX(&r->res_sem);
+ mutex_init(&r->res_mutex);
INIT_LIST_HEAD(&r->res_lookup);
INIT_LIST_HEAD(&r->res_grantqueue);
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
- down(&ls->ls_waiters_sem);
+ mutex_lock(&ls->ls_waiters_mutex);
if (lkb->lkb_wait_type) {
log_print("add_to_waiters error %d", lkb->lkb_wait_type);
goto out;
kref_get(&lkb->lkb_ref);
list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
out:
- up(&ls->ls_waiters_sem);
+ mutex_unlock(&ls->ls_waiters_mutex);
}
static int _remove_from_waiters(struct dlm_lkb *lkb)
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int error;
- down(&ls->ls_waiters_sem);
+ mutex_lock(&ls->ls_waiters_mutex);
error = _remove_from_waiters(lkb);
- up(&ls->ls_waiters_sem);
+ mutex_unlock(&ls->ls_waiters_mutex);
return error;
}
int count = 0, found;
for (;;) {
- found = FALSE;
+ found = 0;
write_lock(&ls->ls_rsbtbl[b].lock);
list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
res_hashchain) {
if (!time_after_eq(jiffies, r->res_toss_time +
dlm_config.toss_secs * HZ))
continue;
- found = TRUE;
+ found = 1;
break;
}
struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
lkb_statequeue);
if (lkb->lkb_id == first->lkb_id)
- return TRUE;
+ return 1;
- return FALSE;
+ return 0;
}
/* Return 1 if the locks' ranges overlap. If the lkb has no range then it is
static inline int ranges_overlap(struct dlm_lkb *lkb1, struct dlm_lkb *lkb2)
{
if (!lkb1->lkb_range || !lkb2->lkb_range)
- return TRUE;
+ return 1;
if (lkb1->lkb_range[RQ_RANGE_END] < lkb2->lkb_range[GR_RANGE_START] ||
lkb1->lkb_range[RQ_RANGE_START] > lkb2->lkb_range[GR_RANGE_END])
- return FALSE;
+ return 0;
- return TRUE;
+ return 1;
}
/* Check if the given lkb conflicts with another lkb on the queue. */
if (this == lkb)
continue;
if (ranges_overlap(lkb, this) && !modes_compat(this, lkb))
- return TRUE;
+ return 1;
}
- return FALSE;
+ return 0;
}
/*
continue;
if (!modes_compat(this, lkb) && !modes_compat(lkb, this))
- return TRUE;
+ return 1;
}
/* if lkb is on the convert queue and is preventing the first
if (self && self != first) {
if (!modes_compat(lkb, first) &&
!queue_conflict(&rsb->res_grantqueue, first))
- return TRUE;
+ return 1;
}
- return FALSE;
+ return 0;
}
/*
*/
if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
- return TRUE;
+ return 1;
/*
* A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
*/
if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
- return TRUE;
+ return 1;
/*
* When using range locks the NOORDER flag is set to avoid the standard
*/
if (lkb->lkb_exflags & DLM_LKF_NOORDER)
- return TRUE;
+ return 1;
/*
* 6-3: Once in that queue [CONVERTING], a conversion request cannot be
*/
if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
- return TRUE;
+ return 1;
/*
* 6-4: By default, a new request is immediately granted only if all
if (now && !conv && list_empty(&r->res_convertqueue) &&
list_empty(&r->res_waitqueue))
- return TRUE;
+ return 1;
/*
* 6-4: Once a lock request is in the queue of ungranted new requests,
if (!now && !conv && list_empty(&r->res_convertqueue) &&
first_in_list(lkb, &r->res_waitqueue))
- return TRUE;
+ return 1;
out:
/*
lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
}
- return FALSE;
+ return 0;
}
/*
list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
demoted = is_demoted(lkb);
- if (can_be_granted(r, lkb, FALSE)) {
+ if (can_be_granted(r, lkb, 0)) {
grant_lock_pending(r, lkb);
grant_restart = 1;
} else {
struct dlm_lkb *lkb, *s;
list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
- if (can_be_granted(r, lkb, FALSE))
+ if (can_be_granted(r, lkb, 0))
grant_lock_pending(r, lkb);
else
high = max_t(int, lkb->lkb_rqmode, high);
{
int error = 0;
- if (can_be_granted(r, lkb, TRUE)) {
+ if (can_be_granted(r, lkb, 1)) {
grant_lock(r, lkb);
queue_cast(r, lkb, 0);
goto out;
/* changing an existing lock may allow others to be granted */
- if (can_be_granted(r, lkb, TRUE)) {
+ if (can_be_granted(r, lkb, 1)) {
grant_lock(r, lkb);
queue_cast(r, lkb, 0);
grant_pending_locks(r);
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
- int error, reply = TRUE;
+ int error, reply = 1;
error = find_lkb(ls, ms->m_remid, &lkb);
if (error)
{
struct dlm_lkb *lkb, *safe;
- down(&ls->ls_waiters_sem);
+ mutex_lock(&ls->ls_waiters_mutex);
list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
log_debug(ls, "pre recover waiter lkid %x type %d flags %x",
lkb->lkb_wait_type);
}
}
- up(&ls->ls_waiters_sem);
+ mutex_unlock(&ls->ls_waiters_mutex);
}
static int remove_resend_waiter(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
struct dlm_lkb *lkb;
int rv = 0;
- down(&ls->ls_waiters_sem);
+ mutex_lock(&ls->ls_waiters_mutex);
list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
if (lkb->lkb_flags & DLM_IFL_RESEND) {
rv = lkb->lkb_wait_type;
break;
}
}
- up(&ls->ls_waiters_sem);
+ mutex_unlock(&ls->ls_waiters_mutex);
if (!rv)
lkb = NULL;
static inline void lock_rsb(struct dlm_rsb *r)
{
- down(&r->res_sem);
+ mutex_lock(&r->res_mutex);
}
static inline void unlock_rsb(struct dlm_rsb *r)
{
- up(&r->res_sem);
+ mutex_unlock(&r->res_mutex);
}
#endif
#endif
static int ls_count;
-static struct semaphore ls_lock;
+static struct mutex ls_lock;
static struct list_head lslist;
static spinlock_t lslist_lock;
static struct task_struct * scand_task;
int error;
ls_count = 0;
- init_MUTEX(&ls_lock);
+ mutex_init(&ls_lock);
INIT_LIST_HEAD(&lslist);
spin_lock_init(&lslist_lock);
return -EEXIST;
}
- ls = kmalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
+ ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
if (!ls)
goto out;
- memset(ls, 0, sizeof(struct dlm_ls) + namelen);
memcpy(ls->ls_name, name, namelen);
ls->ls_namelen = namelen;
ls->ls_exflags = flags;
}
INIT_LIST_HEAD(&ls->ls_waiters);
- init_MUTEX(&ls->ls_waiters_sem);
+ mutex_init(&ls->ls_waiters_mutex);
INIT_LIST_HEAD(&ls->ls_nodes);
INIT_LIST_HEAD(&ls->ls_nodes_gone);
ls->ls_uevent_result = 0;
ls->ls_recoverd_task = NULL;
- init_MUTEX(&ls->ls_recoverd_active);
+ mutex_init(&ls->ls_recoverd_active);
spin_lock_init(&ls->ls_recover_lock);
ls->ls_recover_status = 0;
ls->ls_recover_seq = 0;
ls->ls_recover_args = NULL;
init_rwsem(&ls->ls_in_recovery);
INIT_LIST_HEAD(&ls->ls_requestqueue);
- init_MUTEX(&ls->ls_requestqueue_lock);
+ mutex_init(&ls->ls_requestqueue_mutex);
ls->ls_recover_buf = kmalloc(dlm_config.buffer_size, GFP_KERNEL);
if (!ls->ls_recover_buf)
{
int error = 0;
- down(&ls_lock);
+ mutex_lock(&ls_lock);
if (!ls_count)
error = threads_start();
if (error)
if (!error)
ls_count++;
out:
- up(&ls_lock);
+ mutex_unlock(&ls_lock);
return error;
}
kobject_unregister(&ls->ls_kobj);
kfree(ls);
- down(&ls_lock);
+ mutex_lock(&ls_lock);
ls_count--;
if (!ls_count)
threads_stop();
- up(&ls_lock);
+ mutex_unlock(&ls_lock);
module_put(THIS_MODULE);
return 0;
struct dlm_member *memb;
int w;
- memb = kmalloc(sizeof(struct dlm_member), GFP_KERNEL);
+ memb = kzalloc(sizeof(struct dlm_member), GFP_KERNEL);
if (!memb)
return -ENOMEM;
list_for_each_entry(memb, &ls->ls_nodes, list) {
if (memb->nodeid == nodeid)
- return TRUE;
+ return 1;
}
- return FALSE;
+ return 0;
}
int dlm_is_removed(struct dlm_ls *ls, int nodeid)
list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
if (memb->nodeid == nodeid)
- return TRUE;
+ return 1;
}
- return FALSE;
+ return 0;
}
static void clear_memb_list(struct list_head *head)
/* move departed members from ls_nodes to ls_nodes_gone */
list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
- found = FALSE;
+ found = 0;
for (i = 0; i < rv->node_count; i++) {
if (memb->nodeid == rv->nodeids[i]) {
- found = TRUE;
+ found = 1;
break;
}
}
int *ids = NULL;
int error, count;
- rv = kmalloc(sizeof(struct dlm_recover), GFP_KERNEL);
+ rv = kzalloc(sizeof(struct dlm_recover), GFP_KERNEL);
if (!rv)
return -ENOMEM;
- memset(rv, 0, sizeof(struct dlm_recover));
error = count = dlm_nodeid_list(ls->ls_name, &ids);
if (error <= 0)
switch (msg->h_cmd) {
case DLM_MSG:
- dlm_receive_message(msg, nodeid, FALSE);
+ dlm_receive_message(msg, nodeid, 0);
break;
case DLM_RCOM:
r = recover_list_find(ls, rc->rc_id);
if (!r) {
- log_error(ls, "dlm_recover_master_reply no id %"PRIx64"",
+ log_error(ls, "dlm_recover_master_reply no id %llx",
rc->rc_id);
goto out;
}
if (!list_empty(&r->res_grantqueue) ||
!list_empty(&r->res_convertqueue) ||
!list_empty(&r->res_waitqueue))
- return FALSE;
- return TRUE;
+ return 0;
+ return 1;
}
static int recover_locks(struct dlm_rsb *r)
{
struct dlm_lkb *lkb, *high_lkb = NULL;
uint32_t high_seq = 0;
- int lock_lvb_exists = FALSE;
- int big_lock_exists = FALSE;
+ int lock_lvb_exists = 0;
+ int big_lock_exists = 0;
int lvblen = r->res_ls->ls_lvblen;
list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
continue;
- lock_lvb_exists = TRUE;
+ lock_lvb_exists = 1;
if (lkb->lkb_grmode > DLM_LOCK_CR) {
- big_lock_exists = TRUE;
+ big_lock_exists = 1;
goto setflag;
}
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
continue;
- lock_lvb_exists = TRUE;
+ lock_lvb_exists = 1;
if (lkb->lkb_grmode > DLM_LOCK_CR) {
- big_lock_exists = TRUE;
+ big_lock_exists = 1;
goto setflag;
}
unsigned long start;
int error, neg = 0;
- log_debug(ls, "recover %"PRIx64"", rv->seq);
+ log_debug(ls, "recover %llx", rv->seq);
- down(&ls->ls_recoverd_active);
+ mutex_lock(&ls->ls_recoverd_active);
/*
* Suspending and resuming dlm_astd ensures that no lkb's from this ls
dlm_astd_wake();
- log_debug(ls, "recover %"PRIx64" done: %u ms", rv->seq,
+ log_debug(ls, "recover %llx done: %u ms", rv->seq,
jiffies_to_msecs(jiffies - start));
- up(&ls->ls_recoverd_active);
+ mutex_unlock(&ls->ls_recoverd_active);
return 0;
fail:
dlm_release_root_list(ls);
- log_debug(ls, "recover %"PRIx64" error %d", rv->seq, error);
- up(&ls->ls_recoverd_active);
+ log_debug(ls, "recover %llx error %d", rv->seq, error);
+ mutex_unlock(&ls->ls_recoverd_active);
return error;
}
void dlm_recoverd_suspend(struct dlm_ls *ls)
{
- down(&ls->ls_recoverd_active);
+ mutex_lock(&ls->ls_recoverd_active);
}
void dlm_recoverd_resume(struct dlm_ls *ls)
{
- up(&ls->ls_recoverd_active);
+ mutex_unlock(&ls->ls_recoverd_active);
}
e->nodeid = nodeid;
memcpy(e->request, hd, length);
- down(&ls->ls_requestqueue_lock);
+ mutex_lock(&ls->ls_requestqueue_mutex);
list_add_tail(&e->list, &ls->ls_requestqueue);
- up(&ls->ls_requestqueue_lock);
+ mutex_unlock(&ls->ls_requestqueue_mutex);
}
int dlm_process_requestqueue(struct dlm_ls *ls)
struct dlm_header *hd;
int error = 0;
- down(&ls->ls_requestqueue_lock);
+ mutex_lock(&ls->ls_requestqueue_mutex);
for (;;) {
if (list_empty(&ls->ls_requestqueue)) {
- up(&ls->ls_requestqueue_lock);
+ mutex_unlock(&ls->ls_requestqueue_mutex);
error = 0;
break;
}
e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
- up(&ls->ls_requestqueue_lock);
+ mutex_unlock(&ls->ls_requestqueue_mutex);
hd = (struct dlm_header *) e->request;
- error = dlm_receive_message(hd, e->nodeid, TRUE);
+ error = dlm_receive_message(hd, e->nodeid, 1);
if (error == -EINTR) {
/* entry is left on requestqueue */
break;
}
- down(&ls->ls_requestqueue_lock);
+ mutex_lock(&ls->ls_requestqueue_mutex);
list_del(&e->list);
kfree(e);
if (dlm_locking_stopped(ls)) {
log_debug(ls, "process_requestqueue abort running");
- up(&ls->ls_requestqueue_lock);
+ mutex_unlock(&ls->ls_requestqueue_mutex);
error = -EINTR;
break;
}
void dlm_wait_requestqueue(struct dlm_ls *ls)
{
for (;;) {
- down(&ls->ls_requestqueue_lock);
+ mutex_lock(&ls->ls_requestqueue_mutex);
if (list_empty(&ls->ls_requestqueue))
break;
if (dlm_locking_stopped(ls))
break;
- up(&ls->ls_requestqueue_lock);
+ mutex_unlock(&ls->ls_requestqueue_mutex);
schedule();
}
- up(&ls->ls_requestqueue_lock);
+ mutex_unlock(&ls->ls_requestqueue_mutex);
}
static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
struct dlm_message *ms;
struct rq_entry *e, *safe;
- down(&ls->ls_requestqueue_lock);
+ mutex_lock(&ls->ls_requestqueue_mutex);
list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) {
ms = (struct dlm_message *) e->request;
kfree(e);
}
}
- up(&ls->ls_requestqueue_lock);
+ mutex_unlock(&ls->ls_requestqueue_mutex);
}