X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Focfs2%2Fdlm%2Fdlmlock.c;h=671d4ff222cc083c15aa63ed33048c899da2d26b;hb=987b0145d94eecf292d8b301228356f44611ab7c;hp=5ca57ec650c77657c76e82e66f83dea2180da01a;hpb=3944158a6d33f94668dbd6bdc32ff5c67bb53ec2;p=linux-2.6.git diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 5ca57ec65..671d4ff22 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -53,7 +53,7 @@ #define MLOG_MASK_PREFIX ML_DLM #include "cluster/masklog.h" -static DEFINE_SPINLOCK(dlm_cookie_lock); +static spinlock_t dlm_cookie_lock = SPIN_LOCK_UNLOCKED; static u64 dlm_next_cookie = 1; static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, @@ -141,23 +141,13 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, res->lockname.len)) { kick_thread = 1; call_ast = 1; - } else { - mlog(0, "%s: returning DLM_NORMAL to " - "node %u for reco lock\n", dlm->name, - lock->ml.node); } } else { /* for NOQUEUE request, unless we get the * lock right away, return DLM_NOTQUEUED */ - if (flags & LKM_NOQUEUE) { + if (flags & LKM_NOQUEUE) status = DLM_NOTQUEUED; - if (dlm_is_recovery_lock(res->lockname.name, - res->lockname.len)) { - mlog(0, "%s: returning NOTQUEUED to " - "node %u for reco lock\n", dlm->name, - lock->ml.node); - } - } else { + else { dlm_lock_get(lock); list_add_tail(&lock->list, &res->blocked); kick_thread = 1; @@ -201,7 +191,6 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, struct dlm_lock *lock, int flags) { enum dlm_status status = DLM_DENIED; - int lockres_changed = 1; mlog_entry("type=%d\n", lock->ml.type); mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len, @@ -227,25 +216,8 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, res->state &= ~DLM_LOCK_RES_IN_PROGRESS; lock->lock_pending = 0; if (status != DLM_NORMAL) { - if (status == DLM_RECOVERING && - dlm_is_recovery_lock(res->lockname.name, - res->lockname.len)) { - /* recovery lock was mastered by dead node. - * we need to have calc_usage shoot down this - * lockres and completely remaster it. */ - mlog(0, "%s: recovery lock was owned by " - "dead node %u, remaster it now.\n", - dlm->name, res->owner); - } else if (status != DLM_NOTQUEUED) { - /* - * DO NOT call calc_usage, as this would unhash - * the remote lockres before we ever get to use - * it. treat as if we never made any change to - * the lockres. - */ - lockres_changed = 0; + if (status != DLM_NOTQUEUED) dlm_error(status); - } dlm_revert_pending_lock(res, lock); dlm_lock_put(lock); } else if (dlm_is_recovery_lock(res->lockname.name, @@ -257,12 +229,12 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, mlog(0, "%s: $RECOVERY lock for this node (%u) is " "mastered by %u; got lock, manually granting (no ast)\n", dlm->name, dlm->node_num, res->owner); - list_move_tail(&lock->list, &res->granted); + list_del_init(&lock->list); + list_add_tail(&lock->list, &res->granted); } spin_unlock(&res->spinlock); - if (lockres_changed) - dlm_lockres_calc_usage(dlm, res); + dlm_lockres_calc_usage(dlm, res); wake_up(&res->wq); return status; @@ -299,14 +271,6 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, if (tmpret >= 0) { // successfully sent and received ret = status; // this is already a dlm_status - if (ret == DLM_REJECTED) { - mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres " - "no longer owned by %u. that node is coming back " - "up currently.\n", dlm->name, create.namelen, - create.name, res->owner); - dlm_print_one_lock_resource(res); - BUG(); - } } else { mlog_errno(tmpret); if (dlm_is_host_down(tmpret)) { @@ -408,13 +372,13 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, struct dlm_lock *lock; int kernel_allocated = 0; - lock = kcalloc(1, sizeof(*lock), GFP_NOFS); + lock = kcalloc(1, sizeof(*lock), GFP_KERNEL); if (!lock) return NULL; if (!lksb) { /* zero memory only if kernel-allocated */ - lksb = kcalloc(1, sizeof(*lksb), GFP_NOFS); + lksb = kcalloc(1, sizeof(*lksb), GFP_KERNEL); if (!lksb) { kfree(lock); return NULL; @@ -455,16 +419,11 @@ int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data) if (!dlm_grab(dlm)) return DLM_REJECTED; + mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), + "Domain %s not fully joined!\n", dlm->name); + name = create->name; namelen = create->namelen; - status = DLM_REJECTED; - if (!dlm_domain_fully_joined(dlm)) { - mlog(ML_ERROR, "Domain %s not fully joined, but node %u is " - "sending a create_lock message for lock %.*s!\n", - dlm->name, create->node_idx, namelen, name); - dlm_error(status); - goto leave; - } status = DLM_IVBUFLEN; if (namelen > DLM_LOCKID_NAME_MAX) { @@ -700,22 +659,18 @@ retry_lock: msleep(100); /* no waiting for dlm_reco_thread */ if (recovery) { - if (status != DLM_RECOVERING) - goto retry_lock; - - mlog(0, "%s: got RECOVERING " - "for $RECOVERY lock, master " - "was %u\n", dlm->name, - res->owner); - /* wait to see the node go down, then - * drop down and allow the lockres to - * get cleaned up. need to remaster. */ - dlm_wait_for_node_death(dlm, res->owner, - DLM_NODE_DEATH_WAIT_MAX); + if (status == DLM_RECOVERING) { + mlog(0, "%s: got RECOVERING " + "for $REOCVERY lock, master " + "was %u\n", dlm->name, + res->owner); + dlm_wait_for_node_death(dlm, res->owner, + DLM_NODE_DEATH_WAIT_MAX); + } } else { dlm_wait_for_recovery(dlm); - goto retry_lock; } + goto retry_lock; } if (status != DLM_NORMAL) {