* [Cluster-devel] cluster/group/gfs_controld cpg.c lock_dlm.h ma ...
@ 2006-06-15 15:27 teigland
0 siblings, 0 replies; 4+ messages in thread
From: teigland @ 2006-06-15 15:27 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: teigland at sourceware.org 2006-06-15 15:27:43
Modified files:
group/gfs_controld: cpg.c lock_dlm.h main.c recover.c
Log message:
Significant reworking of how mounts are processed. The previous
approach couldn't deal with certain node failures that occured while
processing a new mounter. In this new approach, processing a mounter
is largely independent of processing node failures. Nodes failing
while processing a mounter hasn't actually been tested yet, so there
are sure to be details to fix.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/cpg.c.diff?cvsroot=cluster&r1=1.1&r2=1.2
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&r1=1.1&r2=1.2
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/main.c.diff?cvsroot=cluster&r1=1.1&r2=1.2
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/recover.c.diff?cvsroot=cluster&r1=1.1&r2=1.2
--- cluster/group/gfs_controld/cpg.c 2006/06/09 20:59:57 1.1
+++ cluster/group/gfs_controld/cpg.c 2006/06/15 15:27:43 1.2
@@ -27,7 +27,7 @@
void receive_recovery_status(struct mountgroup *mg, char *buf, int len,
int from);
void receive_recovery_done(struct mountgroup *mg, char *buf, int len, int from);
-
+char *msg_name(int type);
static void do_deliver(int nodeid, char *data, int len)
{
@@ -58,8 +58,8 @@
discard them since they're only relevant to the app group. */
if (!mg->last_callback) {
- log_group(mg, "discard message type %d len %d from %d",
- hd->type, len, nodeid);
+ log_group(mg, "discard %s len %d from %d",
+ msg_name(hd->type), len, nodeid);
return;
}
--- cluster/group/gfs_controld/lock_dlm.h 2006/06/09 20:59:57 1.1
+++ cluster/group/gfs_controld/lock_dlm.h 2006/06/15 15:27:43 1.2
@@ -111,37 +111,37 @@
char dir[PATH_MAX+1];
char options[MAX_OPTIONS_LEN+1];
- char error_msg[128];
- int mount_client;
- int remount_client;
int last_stop;
int last_start;
int last_finish;
int last_callback;
int start_event_nr;
int start_type;
- int needs_recovery;
- int our_jid;
+
+ char error_msg[128];
+ int mount_client;
+ int remount_client;
+ int init;
+ int got_our_options;
+ int got_our_journals;
+ int mount_client_notified;
+ int mount_client_delay;
+ int delay_send_journals;
+ int got_kernel_mount;
int first_mounter;
int first_mounter_done;
int emulate_first_mounter;
int wait_first_done;
- int init;
- int init2;
int low_finished_nodeid;
+
+ int needs_recovery;
+ int our_jid;
int spectator;
int readonly;
int rw;
int withdraw;
- void *journals_msg;
- int journals_msg_len;
- int journals_msg_from;
- void *options_msg;
- int options_msg_len;
- int options_msg_from;
- struct list_head saved_recovery_status;
-
+ struct list_head saved_messages;
void *start2_fn;
};
@@ -174,11 +174,12 @@
struct list_head list;
int nodeid;
int jid;
- int new;
+
int spectator;
int readonly;
int rw;
uint32_t opts;
+
int tell_gfs_to_recover;
int wait_gfs_recover_done;
int gone_event;
@@ -188,6 +189,7 @@
int recovery_status;
int withdraw;
struct dlm_lksb wd_lksb;
+ int needs_journals;
};
enum {
@@ -234,6 +236,7 @@
int do_remount(int ci, char *dir, char *mode);
int do_withdraw(char *name);
int kernel_recovery_done(char *name);
+void ping_kernel_mount(char *table);
int client_send(int ci, char *buf, int len);
--- cluster/group/gfs_controld/main.c 2006/06/09 20:59:57 1.1
+++ cluster/group/gfs_controld/main.c 2006/06/15 15:27:43 1.2
@@ -201,9 +201,10 @@
if (!strcmp(act, "change@"))
kernel_recovery_done(argv[3]);
-
else if (!strcmp(act, "offline@"))
do_withdraw(argv[3]);
+ else
+ ping_kernel_mount(argv[3]);
return 0;
}
--- cluster/group/gfs_controld/recover.c 2006/06/09 20:59:57 1.1
+++ cluster/group/gfs_controld/recover.c 2006/06/15 15:27:43 1.2
@@ -16,10 +16,12 @@
struct list_head list;
int nodeid;
int len;
- char buf[MAX_MSGLEN];
+ int type;
+ char buf[0];
};
#define SYSFS_DIR "/sys/fs"
+#define JID_INIT -9
extern char *clustername;
extern int our_nodeid;
@@ -27,12 +29,12 @@
struct list_head mounts;
+void send_journals(struct mountgroup *mg, int nodeid);
int hold_withdraw_locks(struct mountgroup *mg);
void release_withdraw_lock(struct mountgroup *mg, struct mg_member *memb);
void release_withdraw_locks(struct mountgroup *mg);
void start_participant_init_2(struct mountgroup *mg);
-void start_participant_2(struct mountgroup *mg);
void start_spectator_init_2(struct mountgroup *mg);
void start_spectator_2(struct mountgroup *mg);
@@ -53,6 +55,8 @@
return -1;
}
+ mg->got_kernel_mount = 1;
+
memset(out, 0, 16);
sprintf(out, "%d", val);
rv = write(fd, out, strlen(out));
@@ -81,6 +85,8 @@
return -1;
}
+ mg->got_kernel_mount = 1;
+
rv = read(fd, buf, len);
if (rv < 0)
log_error("read %s error %d %d", fname, rv, errno);
@@ -117,13 +123,6 @@
return NULL;
}
-void clear_new(struct mountgroup *mg)
-{
- struct mg_member *memb;
- list_for_each_entry(memb, &mg->members, list)
- memb->new = 0;
-}
-
static void start_done(struct mountgroup *mg)
{
log_group(mg, "start_done %d", mg->start_event_nr);
@@ -239,37 +238,59 @@
{
struct save_msg *sm, *sm2;
- if (list_empty(&mg->saved_recovery_status))
+ if (list_empty(&mg->saved_messages))
return;
log_group(mg, "process_saved_recovery_status");
- list_for_each_entry_safe(sm, sm2, &mg->saved_recovery_status, list) {
+ list_for_each_entry_safe(sm, sm2, &mg->saved_messages, list) {
+ if (sm->type != MSG_RECOVERY_STATUS)
+ continue;
_receive_recovery_status(mg, sm->buf, sm->len, sm->nodeid);
list_del(&sm->list);
free(sm);
}
}
+char *msg_name(int type)
+{
+ switch (type) {
+ case MSG_JOURNAL:
+ return "MSG_JOURNAL";
+ case MSG_OPTIONS:
+ return "MSG_OPTIONS";
+ case MSG_REMOUNT:
+ return "MSG_REMOUNT";
+ case MSG_PLOCK:
+ return "MSG_PLOCK";
+ case MSG_RECOVERY_STATUS:
+ return "MSG_RECOVERY_STATUS";
+ case MSG_RECOVERY_DONE:
+ return "MSG_RECOVERY_DONE";
+ }
+ return "unknown";
+}
+
/* we can receive recovery_status messages from other nodes doing start before
we actually process the corresponding start callback ourselves */
-void save_recovery_status(struct mountgroup *mg, char *buf, int len, int from)
+void save_message(struct mountgroup *mg, char *buf, int len, int from, int type)
{
struct save_msg *sm;
- sm = malloc(sizeof(struct save_msg));
+ sm = malloc(sizeof(struct save_msg) + len);
if (!sm)
return;
- memset(sm, 0, sizeof(struct save_msg));
+ memset(sm, 0, sizeof(struct save_msg) + len);
memcpy(&sm->buf, buf, len);
+ sm->type = type;
sm->len = len;
sm->nodeid = from;
- list_add_tail(&sm->list, &mg->saved_recovery_status);
+ log_group(mg, "save %s from %d len %d", msg_name(type), from, len);
- log_group(mg, "save_recovery_status from %d len %d", from, len);
+ list_add_tail(&sm->list, &mg->saved_messages);
}
void receive_recovery_status(struct mountgroup *mg, char *buf, int len,
@@ -277,7 +298,7 @@
{
switch (mg->last_callback) {
case DO_STOP:
- save_recovery_status(mg, buf, len, from);
+ save_message(mg, buf, len, from, MSG_RECOVERY_STATUS);
break;
case DO_START:
_receive_recovery_status(mg, buf, len, from);
@@ -460,6 +481,97 @@
free(buf);
}
+/* We set the new member's jid to the lowest unused jid.
+ If we're the lowest existing member (by nodeid), then
+ send jid info to the new node. */
+
+/* Look at rw/ro/spectator status of all existing mounters and whether
+ we need to do recovery. Based on that, decide if the current mount
+ mode (ro/spectator) is permitted; if not, set jid = -2. If spectator
+ mount and it's ok, set jid = -1. If ro or rw mount and it's ok, set
+ real jid. */
+
+int assign_journal(struct mountgroup *mg, struct mg_member *new)
+{
+ struct mg_member *memb;
+ int i, total, rw_count, ro_count, spect_count, invalid_count;
+
+ total = rw_count = ro_count = spect_count = invalid_count = 0;
+
+ list_for_each_entry(memb, &mg->members, list) {
+ if (memb->nodeid == new->nodeid)
+ continue;
+ total++;
+ if (memb->jid == -2)
+ invalid_count++;
+ else if (memb->spectator)
+ spect_count++;
+ else if (memb->rw)
+ rw_count++;
+ else if (memb->readonly)
+ ro_count++;
+ }
+
+ log_group(mg, "assign_journal: total %d iv %d rw %d ro %d spect %d",
+ total, invalid_count, rw_count, ro_count, spect_count);
+
+ /* do we let the new member mount? jid=-2 means no.
+ - we only allow an rw mount when the fs needs recovery
+ - we only allow a single rw mount when the fs needs recovery */
+
+ if (mg->needs_recovery) {
+ if (!new->rw || rw_count)
+ new->jid = -2;
+ }
+
+ if (new->jid == -2) {
+ log_group(mg, "assign_journal: fail - needs_recovery %d",
+ mg->needs_recovery);
+ goto out;
+ }
+
+ if (new->spectator) {
+ log_group(mg, "assign_journal: new spectator allowed");
+ new->jid = -1;
+ goto out;
+ }
+
+ for (i = 0; i < 1024; i++) {
+ memb = find_memb_jid(mg, i);
+ if (!memb) {
+ new->jid = i;
+ break;
+ }
+ }
+
+ /* Currently the fs needs recovery, i.e. none of the current
+ mounters (ro/spectators) can recover journals. So, this new rw
+ mounter is told to do first-mounter recovery of all the journals. */
+
+ if (mg->needs_recovery) {
+ log_group(mg, "assign_journal: new member OPT_RECOVER");
+ new->opts |= MEMB_OPT_RECOVER;
+ }
+
+ out:
+ log_group(mg, "assign_journal: new member %d got jid %d",
+ new->nodeid, new->jid);
+
+ /* if we're the first mounter and haven't gotten others_may_mount
+ yet, then don't send journals until kernel_recovery_done_first
+ so the second node won't mount the fs until omm. */
+
+ if (mg->low_finished_nodeid == our_nodeid) {
+ if (mg->first_mounter && !mg->first_mounter_done) {
+ log_group(mg, "delay sending journals to %d",
+ new->nodeid);
+ mg->delay_send_journals = new->nodeid;
+ } else
+ send_journals(mg, new->nodeid);
+ }
+ return 0;
+}
+
void _receive_options(struct mountgroup *mg, char *buf, int len, int from)
{
struct mg_member *memb;
@@ -475,9 +587,6 @@
return;
}
- if (from == our_nodeid)
- return;
-
if (strstr(options, "spectator")) {
memb->spectator = 1;
memb->opts |= MEMB_OPT_SPECT;
@@ -489,34 +598,58 @@
memb->opts |= MEMB_OPT_RO;
}
- log_group(mg, "receive_options from %d rw=%d ro=%d spect=%d opts=%x",
+ log_group(mg, "_receive_options from %d rw=%d ro=%d spect=%d opts=%x",
from, memb->rw, memb->readonly, memb->spectator, memb->opts);
+
+ assign_journal(mg, memb);
}
void receive_options(struct mountgroup *mg, char *buf, int len, int from)
{
struct gdlm_header *hd = (struct gdlm_header *)buf;
-
- if (hd->nodeid == our_nodeid)
- return;
+ struct mg_member *memb;
log_group(mg, "receive_options from %d len %d last_cb %d",
from, len, mg->last_callback);
- /* If last_callback isn't DO_START it means we've not gotten
- the start callback for the new node addition yet, and we need to
- save this message to be processed after we get our first start. */
-
- if (mg->last_callback != DO_START) {
- mg->options_msg = malloc(len);
- mg->options_msg_len = len;
- mg->options_msg_from = from;
- memcpy(mg->options_msg, buf, len);
- } else {
- void (*start2)(struct mountgroup *mg) = mg->start2_fn;
+ if (hd->nodeid == our_nodeid) {
+ mg->got_our_options = 1;
+ return;
+ }
+
+ if (!mg->got_our_options) {
+ log_group(mg, "ignore options from %d", from);
+ return;
+ }
+
+ /* we can receive an options message before getting the start
+ that adds the mounting node that sent the options, or
+ we can receive options messages before we get the journals
+ message for out own mount */
+
+ memb = find_memb_nodeid(mg, from);
+
+ if (!memb || !mg->got_our_journals)
+ save_message(mg, buf, len, from, MSG_OPTIONS);
+ else
_receive_options(mg, buf, len, from);
- start2(mg);
- mg->start2_fn = NULL;
+}
+
+void process_saved_options(struct mountgroup *mg)
+{
+ struct save_msg *sm, *sm2;
+
+ if (list_empty(&mg->saved_messages))
+ return;
+
+ log_group(mg, "process_saved_options");
+
+ list_for_each_entry_safe(sm, sm2, &mg->saved_messages, list) {
+ if (sm->type != MSG_OPTIONS)
+ continue;
+ _receive_options(mg, sm->buf, sm->len, sm->nodeid);
+ list_del(&sm->list);
+ free(sm);
}
}
@@ -564,6 +697,7 @@
void _receive_journals(struct mountgroup *mg, char *buf, int len, int from)
{
+ void (*start2)(struct mountgroup *mg) = mg->start2_fn;
struct mg_member *memb, *memb2;
struct gdlm_header *hd;
int *ids, count, i, nodeid, jid, opts;
@@ -571,13 +705,6 @@
hd = (struct gdlm_header *)buf;
count = (len - sizeof(struct gdlm_header)) / (NUM * sizeof(int));
-
- if (count != mg->memb_count) {
- log_error("invalid journals message len %d counts %d %d",
- len, count, mg->memb_count);
- return;
- }
-
ids = (int *) (buf + sizeof(struct gdlm_header));
for (i = 0; i < count; i++) {
@@ -615,140 +742,47 @@
memb->spectator = 1;
}
}
+
+ /* we delay processing any options messages from new mounters
+ until after we receive the journals message for our own mount */
+ process_saved_options(mg);
+
+ start2(mg);
}
void receive_journals(struct mountgroup *mg, char *buf, int len, int from)
{
struct gdlm_header *hd = (struct gdlm_header *)buf;
+ struct mg_member *memb;
int count;
- if (hd->to_nodeid && hd->to_nodeid != our_nodeid)
- return;
-
count = (len - sizeof(struct gdlm_header)) / (NUM * sizeof(int));
- log_group(mg, "receive_journals from %d len %d count %d last_cb %d",
- from, len, count, mg->last_callback);
-
- /* If init is still 1 it means we've not run do_start()
- for our join yet, and we need to save this message to be
- processed after we get our first start. */
-
-
- /* it should now be impossible to receive a journals message prior to
- our start because the node sending journals won't do so until
- receiving our options message
- if (mg->init) {
- mg->journals_msg = malloc(len);
- mg->journals_msg_len = len;
- mg->journals_msg_from = from;
- memcpy(mg->journals_msg, buf, len);
- } else {
- *******/
-
- ASSERT(mg->last_callback == DO_START);
-
- {
- void (*start2)(struct mountgroup *mg) = mg->start2_fn;
- _receive_journals(mg, buf, len, from);
- start2(mg);
- mg->start2_fn = NULL;
- }
-}
-
-/* We set the new member's jid to the lowest unused jid.
- If we're the lowest existing member (by nodeid), then
- send jid info to the new node. */
-
-/* Look at rw/ro/spectator status of all existing mounters and whether
- we need to do recovery. Based on that, decide if the current mount
- mode (ro/spectator) is permitted; if not, set jid = -2. If spectator
- mount and it's ok, set jid = -1. If ro or rw mount and it's ok, set
- real jid. */
-
-int discover_journals(struct mountgroup *mg)
-{
- struct mg_member *memb, *new = NULL;
- int i, total, rw_count, ro_count, spect_count, invalid_count;
-
- total = rw_count = ro_count = spect_count = invalid_count = 0;
+ log_group(mg, "receive_journals from %d to %d len %d count %d cb %d",
+ from, hd->to_nodeid, len, count, mg->last_callback);
- list_for_each_entry(memb, &mg->members, list) {
- if (memb->new && new) {
- log_error("more than one new member %d %d",
- new->nodeid, memb->nodeid);
- return -1;
- } else if (memb->new) {
- new = memb;
- } else {
- total++;
- if (memb->jid == -2)
- invalid_count++;
- else if (memb->spectator)
- spect_count++;
- else if (memb->rw)
- rw_count++;
- else if (memb->readonly)
- ro_count++;
- }
- }
-
- if (!new) {
- log_group(mg, "discover_journals: no new member");
- return 0;
- }
+ /* just like we can receive an options msg from a newly added node
+ before we get the start adding it, we can receive the journals
+ message sent to it before we get the start adding it */
- log_group(mg, "discover_journals: total %d iv %d rw %d ro %d spect %d",
- total, invalid_count, rw_count, ro_count, spect_count);
-
- log_group(mg, "discover_journals: new member %d rw=%d ro=%d spect=%d",
- new->nodeid, new->rw, new->readonly, new->spectator);
-
- /* do we let the new member mount? jid=-2 means no.
- - we only allow an rw mount when the fs needs recovery
- - we only allow a single rw mount when the fs needs recovery */
-
- if (mg->needs_recovery) {
- if (!new->rw || rw_count)
- new->jid = -2;
- }
-
- if (new->jid == -2) {
- log_group(mg, "discover_journals: fail - needs_recovery %d",
- mg->needs_recovery);
- goto out;
- }
-
- if (new->spectator) {
- log_group(mg, "discover_journals: new spectator allowed");
- new->jid = -1;
- goto out;
- }
-
- for (i = 0; i < 1024; i++) {
- memb = find_memb_jid(mg, i);
- if (!memb) {
- new->jid = i;
- break;
- }
+ memb = find_memb_nodeid(mg, hd->to_nodeid);
+ if (!memb) {
+ log_group(mg, "receive_journals from %d to unknown %d",
+ from, hd->to_nodeid);
+ return;
}
+ memb->needs_journals = 0;
- /* Currently the fs needs recovery, i.e. none of the current
- mounters (ro/spectators) can recover journals. So, this new rw
- mounter is told to do first-mounter recovery of all the journals. */
+ if (hd->to_nodeid && hd->to_nodeid != our_nodeid)
+ return;
- if (mg->needs_recovery) {
- log_group(mg, "discover_journals: new member OPT_RECOVER");
- new->opts |= MEMB_OPT_RECOVER;
+ if (mg->got_our_journals) {
+ log_group(mg, "receive_journals from %d duplicate", from);
+ return;
}
+ mg->got_our_journals = 1;
- out:
- log_group(mg, "discover_journals: new member %d got jid %d",
- new->nodeid, new->jid);
-
- if (mg->low_finished_nodeid == our_nodeid)
- send_journals(mg, new->nodeid);
- return 0;
+ _receive_journals(mg, buf, len, from);
}
static void add_ordered_member(struct mountgroup *mg, struct mg_member *new)
@@ -786,10 +820,13 @@
memset(memb, 0, sizeof(*memb));
memb->nodeid = nodeid;
- memb->jid = -9;
- memb->new = 1;
+ memb->jid = JID_INIT;
add_ordered_member(mg, memb);
mg->memb_count++;
+
+ if (!mg->init)
+ memb->needs_journals = 1;
+
return 0;
}
@@ -837,6 +874,8 @@
clear_memb_list(&mg->members_gone);
}
+/* This can happen before we receive a journals message for our mount. */
+
void recover_members(struct mountgroup *mg, int num_nodes,
int *nodeids, int *pos_out, int *neg_out)
{
@@ -867,13 +906,13 @@
memb->local_recovery_status = 0;
/* - journal cb for failed or withdrawing nodes
- - journal cb only if failed node finished joining
+ - failed node was assigned a journal
- no journal cb if failed node was spectator
- no journal cb if we've already done a journl cb */
if ((memb->gone_type == GROUP_NODE_FAILED ||
memb->withdraw) &&
- memb->mount_finished &&
+ memb->jid != JID_INIT &&
!memb->spectator &&
!memb->wait_gfs_recover_done) {
memb->tell_gfs_to_recover = 1;
@@ -887,7 +926,7 @@
mg->spectator,
mg->start_type,
memb->withdraw,
- memb->mount_finished,
+ memb->jid,
memb->spectator,
memb->wait_gfs_recover_done);
}
@@ -929,9 +968,8 @@
INIT_LIST_HEAD(&mg->members);
INIT_LIST_HEAD(&mg->members_gone);
INIT_LIST_HEAD(&mg->resources);
- INIT_LIST_HEAD(&mg->saved_recovery_status);
+ INIT_LIST_HEAD(&mg->saved_messages);
mg->init = 1;
- mg->init2 = 1;
strncpy(mg->name, name, MAXNAME);
@@ -1130,10 +1168,11 @@
we delayed calling start_done() (to complete adding
the second node) until here. */
- if (mg->wait_first_done) {
- clear_new(mg);
+ if (mg->wait_first_done)
start_done(mg);
- }
+
+ if (mg->delay_send_journals)
+ send_journals(mg, mg->delay_send_journals);
}
return 0;
}
@@ -1147,13 +1186,15 @@
struct mg_member *memb;
int rv;
- if (mg->spectator || mg->readonly) {
+ if (mg->spectator || mg->readonly || mg->our_jid == JID_INIT) {
list_for_each_entry(memb, &mg->members_gone, list) {
if (!memb->tell_gfs_to_recover)
continue;
- log_group(mg, "recover journal %d nodeid %d skip ro",
- memb->jid, memb->nodeid);
+ log_group(mg, "recover journal %d nodeid %d skip, "
+ "spect %d ro %d our_jid %d",
+ memb->jid, memb->nodeid,
+ mg->spectator, mg->readonly, mg->our_jid);
memb->tell_gfs_to_recover = 0;
memb->local_recovery_status = RS_READONLY;
}
@@ -1395,6 +1436,11 @@
strncpy(buf, mg->error_msg, MAXLINE);
error = 1;
} else {
+ if (mg->mount_client_delay) {
+ log_group(mg, "notify_mount_client delayed");
+ return;
+ }
+
if (mg->our_jid < 0)
snprintf(buf, MAXLINE, "hostdata=id=%u:first=%d",
mg->id, mg->first_mounter);
@@ -1414,7 +1460,24 @@
if (error) {
log_group(mg, "leaving due to mount error: %s", mg->error_msg);
group_leave(gh, mg->name);
- }
+ } else
+ mg->mount_client_notified = 1;
+}
+
+void ping_kernel_mount(char *table)
+{
+ struct mountgroup *mg;
+ char buf[MAXLINE];
+ char *name = strstr(table, ":") + 1;
+ int rv;
+
+ mg = find_mg(name);
+ if (!mg)
+ return;
+
+ rv = get_sysfs(mg, "id", buf, sizeof(buf));
+
+ log_group(mg, "ping_kernel_mount %d", rv);
}
/* When mounting a fs, we first join the mountgroup, then tell mount.gfs
@@ -1438,9 +1501,54 @@
}
}
+/* The processing of new mounters (send/recv options, send/recv journals,
+ notify mount.gfs) is not very integrated with the stop/start/finish
+ callbacks from libgroup. A start callback just notifies us of a new
+ mounter and the options/journals messages drive things from there.
+ Recovery for failed nodes _is_ controlled more directly by the
+ stop/start/finish callbacks. So, processing new mounters happens
+ independently of recovery and of the libgroup callbacks. One place
+ where they need to intersect, though, is in stopping/suspending
+ gfs-kernel:
+ - When we get a stop callback, we need to be certain that gfs-kernel
+ is blocked.
+ - When a mounter notifies mount.gfs to go ahead, gfs-kernel will
+ shortly begin running in an unblocked fashion as it goes through
+ the kernel mounting process.
+ Given this, we need to be sure that if gfs-kernel is supposed to be
+ blocked, we don't notify mount.gfs to go ahead and do the kernel mount
+ since that starts gfs-kernel in an unblocked state. */
+
+/* - if we're unmounting, the kernel is gone, so no problem.
+ - if we've just mounted and notified mount.gfs, then wait for kernel
+ mount and then block.
+ - if we're mounting and have not yet notified mount.gfs, then set
+ a flag that delays the notification until block is set to 0. */
+
int do_stop(struct mountgroup *mg)
{
- set_sysfs(mg, "block", 1);
+ int rv;
+
+ for (;;) {
+ rv = set_sysfs(mg, "block", 1);
+ if (!rv)
+ break;
+
+ /* if the kernel instance of gfs existed before but now
+ we can't see it, that must mean it's been unmounted,
+ so it's implicitly stopped */
+
+ if (mg->got_kernel_mount)
+ break;
+
+ if (mg->mount_client_notified)
+ wait_for_kernel_mount(mg);
+ else {
+ mg->mount_client_delay = 1;
+ break;
+ }
+ }
+
group_stop_done(gh, mg->name);
return 0;
}
@@ -1498,12 +1606,17 @@
leave_blocked = 1;
}
- if (mg->mount_client) {
- notify_mount_client(mg);
- wait_for_kernel_mount(mg);
- } else if (!leave_blocked)
+ if (!leave_blocked) {
set_sysfs(mg, "block", 0);
+ /* we may have been holding back our local mount due to
+ being stopped/blocked */
+ if (mg->mount_client_delay) {
+ mg->mount_client_delay = 0;
+ notify_mount_client(mg);
+ }
+ }
+
return 0;
}
@@ -1544,9 +1657,7 @@
struct mg_member *memb;
log_group(mg, "start_first_mounter");
-
set_our_memb_options(mg);
-
memb = find_memb_nodeid(mg, our_nodeid);
ASSERT(memb);
@@ -1561,11 +1672,12 @@
mg->our_jid = 0;
mg->first_mounter = 1;
mg->first_mounter_done = 0;
+ mg->got_our_options = 1;
+ mg->got_our_journals = 1;
hold_withdraw_locks(mg);
}
- clear_new(mg);
start_done(mg);
- mg->init = 0;
+ notify_mount_client(mg);
}
/* called for the initial start on a rw/ro mounter;
@@ -1574,25 +1686,11 @@
void start_participant_init(struct mountgroup *mg)
{
log_group(mg, "start_participant_init");
-
set_our_memb_options(mg);
send_options(mg);
hold_withdraw_locks(mg);
-
- if (mg->journals_msg) {
- _receive_journals(mg,
- mg->journals_msg,
- mg->journals_msg_len,
- mg->journals_msg_from);
- free(mg->journals_msg);
- mg->journals_msg = NULL;
-
- start_participant_init_2(mg);
- } else {
- /* will be called in receive_journals() */
- mg->start2_fn = start_participant_init_2;
- }
- mg->init = 0;
+ start_done(mg);
+ mg->start2_fn = start_participant_init_2;
}
/* called for the initial start on a rw/ro mounter after _receive_journals() */
@@ -1613,7 +1711,7 @@
/* fs needs recovery and existing mounters can't recover it,
i.e. they're spectator/readonly, so we're told to do
first-mounter recovery on the fs. */
-
+
if (first_mounter_recovery(mg)) {
log_group(mg, "first_mounter_recovery");
mg->emulate_first_mounter = 1;
@@ -1621,12 +1719,13 @@
mg->first_mounter_done = 0;
}
out:
- clear_new(mg);
- start_done(mg);
- mg->init2 = 0;
+ notify_mount_client(mg);
}
-/* called for a non-initial start on a normal mounter */
+/* called for a non-initial start on a normal mounter.
+ NB we can get here without having received a journals message for
+ our (recent) mount yet in which case we don't know the jid or ro/rw
+ status of any members, and don't know our own jid. */
void start_participant(struct mountgroup *mg, int pos, int neg)
{
@@ -1635,80 +1734,40 @@
if (pos) {
hold_withdraw_locks(mg);
- if (mg->options_msg) {
- _receive_options(mg,
- mg->options_msg,
- mg->options_msg_len,
- mg->options_msg_from);
- free(mg->options_msg);
- mg->options_msg = NULL;
+ /* If we're the first mounter, and we're adding a second
+ node here, but haven't gotten first_done (others_may_mount)
+ from gfs yet, then don't do the start_done() to complete
+ adding the second node. Set wait_first_done=1 to have
+ first_recovery_done() call start_done(). This also requires
+ that we unblock locking on the first mounter if gfs hasn't
+ done others_may_mount yet. */
+
+ if (mg->first_mounter && !mg->first_mounter_done) {
+ mg->wait_first_done = 1;
+ set_sysfs(mg, "block", 0);
+ log_group(mg, "delay start_done til others_may_mount");
+ } else
+ start_done(mg);
+
+ mg->start2_fn = NULL;
+ process_saved_options(mg);
- start_participant_2(mg);
- } else {
- /* will be called in receive_options() */
- mg->start2_fn = start_participant_2;
- }
} else if (neg) {
recover_journals(mg);
process_saved_recovery_status(mg);
}
}
-/* called for a non-initial start on a normal mounter when adding a node,
- after _receive_options(). we need to know if the new node is a spectator
- or not (from options) before deciding if it should be given a journal
- in discover_journals() */
-
-void start_participant_2(struct mountgroup *mg)
-{
- log_group(mg, "start_participant_2");
-
- discover_journals(mg);
-
- /* If we're the first mounter, and we're adding a second
- node here, but haven't gotten first_done (others_may_mount) from gfs
- yet, then don't do the start_done() to complete adding the
- second node. Set wait_first_done=1 to have first_recovery_done()
- call start_done().
- This also requires that we unblock locking on the first
- mounter if gfs hasn't done others_may_mount yet. */
-
- if (mg->init2 && mg->first_mounter && !mg->first_mounter_done) {
- mg->wait_first_done = 1;
- set_sysfs(mg, "block", 0);
- log_group(mg, "delay start_done until others_may_mount");
- } else {
- clear_new(mg);
- start_done(mg);
- }
-
- mg->init2 = 0;
-}
-
/* called for the initial start on a spectator mounter */
void start_spectator_init(struct mountgroup *mg)
{
log_group(mg, "start_spectator_init");
-
set_our_memb_options(mg);
send_options(mg);
hold_withdraw_locks(mg);
-
- if (mg->journals_msg) {
- _receive_journals(mg,
- mg->journals_msg,
- mg->journals_msg_len,
- mg->journals_msg_from);
- free(mg->journals_msg);
- mg->journals_msg = NULL;
-
- start_spectator_init_2(mg);
- } else {
- /* will be called in receive_journals() */
- mg->start2_fn = start_spectator_init_2;
- }
- mg->init = 0;
+ start_done(mg);
+ mg->start2_fn = start_spectator_init_2;
}
/* called for the initial start on a spectator mounter,
@@ -1726,9 +1785,7 @@
else
ASSERT(mg->our_jid == -1);
- clear_new(mg);
- start_done(mg);
- mg->init2 = 0;
+ notify_mount_client(mg);
}
/* called for a non-initial start on a spectator mounter */
@@ -1739,37 +1796,14 @@
if (pos) {
hold_withdraw_locks(mg);
-
- if (mg->options_msg) {
- _receive_options(mg,
- mg->options_msg,
- mg->options_msg_len,
- mg->options_msg_from);
- free(mg->options_msg);
- mg->options_msg = NULL;
-
- start_spectator_2(mg);
- } else {
- /* will be called in receive_options() */
- mg->start2_fn = start_spectator_2;
- }
+ start_done(mg);
+ process_saved_options(mg);
} else if (neg) {
recover_journals(mg);
process_saved_recovery_status(mg);
}
}
-/* called for a non-initial start on a spectator mounter when adding a
- node, after _receive_options() */
-
-void start_spectator_2(struct mountgroup *mg)
-{
- log_group(mg, "start_spectator_2");
- discover_journals(mg);
- clear_new(mg);
- start_done(mg);
-}
-
/* If nodeA fails, nodeB is recovering journalA and nodeB fails before
finishing, then nodeC needs to tell gfs to recover both journalA and
journalB. We do this by setting tell_gfs_to_recover back to 1 for
@@ -1792,7 +1826,25 @@
}
}
+/* New mounters may be waiting for a journals message that a failed node (as
+ low nodeid) would have sent. If the low nodeid failed and we're the new low
+ nodeid, then send a journals message to any nodes for whom we've not seen a
+ journals message. */
+
+void resend_journals(struct mountgroup *mg)
+{
+ struct mg_member *memb;
+
+ list_for_each_entry(memb, &mg->members, list) {
+ if (!memb->needs_journals)
+ continue;
+ log_group(mg, "resend_journals to %d", memb->nodeid);
+ send_journals(mg, memb->nodeid);
+ }
+}
+
/*
+ old method:
A is rw mount, B mounts rw
do_start do_start
@@ -1808,11 +1860,27 @@
start_participant_init_2
group_start_done
do_finish do_finish
+
+ new method: decouples stop/start/finish from mount processing
+ A is rw mount, B mounts rw
+
+ do_start do_start
+ start_participant start_participant_init
+ start_done send_options
+ start_done
+ do_finish do_finish
+
+ receive_options
+ assign_journal
+ send_journals
+ receive_journals
+ start_participant_init_2
+ notify_mount_client
*/
void do_start(struct mountgroup *mg, int type, int member_count, int *nodeids)
{
- int pos = 0, neg = 0;
+ int pos = 0, neg = 0, low;
mg->start_event_nr = mg->last_start;
mg->start_type = type;
@@ -1820,10 +1888,18 @@
log_group(mg, "start %d init %d type %d member_count %d",
mg->last_start, mg->init, type, member_count);
+ low = mg->low_finished_nodeid;
+
recover_members(mg, member_count, nodeids, &pos, &neg);
reset_unfinished_recoveries(mg);
+ if (neg && low != mg->low_finished_nodeid && low == our_nodeid) {
+ log_group(mg, "low nodeid failed old %d new %d",
+ low, mg->low_finished_nodeid);
+ resend_journals(mg);
+ }
+
if (mg->init) {
if (member_count == 1)
start_first_mounter(mg);
@@ -1831,6 +1907,7 @@
start_spectator_init(mg);
else
start_participant_init(mg);
+ mg->init = 0;
} else {
if (mg->spectator)
start_spectator(mg, pos, neg);
@@ -1839,8 +1916,7 @@
}
}
-/* FIXME:
-
+/*
What repurcussions are there from umount shutting down gfs in the
kernel before we leave the mountgroup? We can no longer participate
in recovery even though we're in the group -- what are the end cases
^ permalink raw reply [flat|nested] 4+ messages in thread
* [Cluster-devel] cluster/group/gfs_controld cpg.c lock_dlm.h ma ...
@ 2006-08-09 19:35 teigland
0 siblings, 0 replies; 4+ messages in thread
From: teigland @ 2006-08-09 19:35 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: teigland at sourceware.org 2006-08-09 19:35:26
Modified files:
group/gfs_controld: cpg.c lock_dlm.h main.c plock.c
Log message:
don't send plock debugging to stdout with -D, use -P to get that now
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/cpg.c.diff?cvsroot=cluster&r1=1.5&r2=1.6
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&r1=1.12&r2=1.13
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/main.c.diff?cvsroot=cluster&r1=1.7&r2=1.8
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.11&r2=1.12
--- cluster/group/gfs_controld/cpg.c 2006/07/31 18:37:07 1.5
+++ cluster/group/gfs_controld/cpg.c 2006/08/09 19:35:26 1.6
@@ -39,8 +39,10 @@
mg = find_mg(hd->name);
if (!mg) {
+ /*
log_error("cpg message from %d len %d no group %s",
nodeid, len, hd->name);
+ */
return;
}
--- cluster/group/gfs_controld/lock_dlm.h 2006/08/08 21:19:17 1.12
+++ cluster/group/gfs_controld/lock_dlm.h 2006/08/09 19:35:26 1.13
@@ -65,6 +65,7 @@
};
extern char *prog_name;
+extern int plock_debug_opt;
extern int daemon_debug_opt;
extern char daemon_debug_buf[256];
extern char dump_buf[DUMP_SIZE];
@@ -88,6 +89,13 @@
daemon_dump_save(); \
} while (0)
+#define log_plock(g, fmt, args...) \
+do { \
+ snprintf(daemon_debug_buf, 255, "%ld %s " fmt "\n", time(NULL), \
+ (g)->name, ##args); \
+ if (plock_debug_opt) fprintf(stderr, "%s", daemon_debug_buf); \
+} while (0)
+
#define log_error(fmt, args...) \
do { \
log_debug(fmt, ##args); \
--- cluster/group/gfs_controld/main.c 2006/07/31 18:37:07 1.7
+++ cluster/group/gfs_controld/main.c 2006/08/09 19:35:26 1.8
@@ -12,7 +12,7 @@
#include "lock_dlm.h"
-#define OPTION_STRING "DhVw"
+#define OPTION_STRING "DPhVw"
#define LOCKFILE_NAME "/var/run/gfs_controld.pid"
struct client {
@@ -473,6 +473,10 @@
daemon_debug_opt = 1;
break;
+ case 'P':
+ plock_debug_opt = 1;
+ break;
+
case 'h':
print_usage();
exit(EXIT_SUCCESS);
@@ -534,6 +538,7 @@
}
char *prog_name;
+int plock_debug_opt;
int daemon_debug_opt;
char daemon_debug_buf[256];
char dump_buf[DUMP_SIZE];
--- cluster/group/gfs_controld/plock.c 2006/08/08 21:19:17 1.11
+++ cluster/group/gfs_controld/plock.c 2006/08/09 19:35:26 1.12
@@ -308,7 +308,7 @@
goto fail;
}
- log_group(mg, "read plock %llx %s %s %llx-%llx %d/%u/%llx w %d",
+ log_plock(mg, "read plock %llx %s %s %llx-%llx %d/%u/%llx w %d",
info.number,
op_str(info.optype),
ex_str(info.optype, info.ex),
@@ -867,7 +867,7 @@
info_bswap_in(&info);
- log_group(mg, "receive plock %llx %s %s %llx-%llx %d/%u/%llx w %d",
+ log_plock(mg, "receive plock %llx %s %s %llx-%llx %d/%u/%llx w %d",
info.number,
op_str(info.optype),
ex_str(info.optype, info.ex),
@@ -985,7 +985,7 @@
section_len = count * sizeof(struct pack_plock);
- log_group(mg, "pack %llx count %d", r->number, count);
+ log_plock(mg, "pack %llx count %d", r->number, count);
}
int unpack_section_buf(struct mountgroup *mg, char *numbuf, int buflen)
@@ -1005,7 +1005,7 @@
INIT_LIST_HEAD(&r->waiters);
sscanf(numbuf, "r%llu", &r->number);
- log_group(mg, "unpack %llx count %d", r->number, count);
+ log_plock(mg, "unpack %llx count %d", r->number, count);
pp = (struct pack_plock *) §ion_buf;
^ permalink raw reply [flat|nested] 4+ messages in thread
* [Cluster-devel] cluster/group/gfs_controld cpg.c lock_dlm.h ma ...
@ 2007-11-28 20:49 teigland
0 siblings, 0 replies; 4+ messages in thread
From: teigland @ 2007-11-28 20:49 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: teigland at sourceware.org 2007-11-28 20:49:08
Modified files:
group/gfs_controld: cpg.c lock_dlm.h main.c plock.c recover.c
Log message:
A performance optimization for plocks. This speeds up locks that are
repeatedly accessed by processes on a single node. Plocks used by
processes on multiple nodes work the same way as before. The
optimization is disabled by default, and can be enabled by setting
<gfs_controld plock_ownership="1"/>
in cluster.conf, or by starting gfs_controld with "-o1". It is disabled
by default because enabling it breaks compatibility with previous versions
of gfs_controld. If all nodes in the cluster are running this version,
then plock_ownership can be enabled.
The plock_ownership mode needs extensive testing. This also introduces
some minor changes when plock_ownership is disabled, so new testing is
also required in that mode. Abhi and I worked on this together.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/cpg.c.diff?cvsroot=cluster&r1=1.13&r2=1.14
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&r1=1.30&r2=1.31
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/main.c.diff?cvsroot=cluster&r1=1.31&r2=1.32
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.33&r2=1.34
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/recover.c.diff?cvsroot=cluster&r1=1.33&r2=1.34
--- cluster/group/gfs_controld/cpg.c 2007/07/18 20:35:50 1.13
+++ cluster/group/gfs_controld/cpg.c 2007/11/28 20:49:08 1.14
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2006 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -13,6 +13,8 @@
#include <openais/cpg.h>
#include "lock_dlm.h"
+extern struct list_head mounts;
+extern unsigned int protocol_active[3];
static cpg_handle_t daemon_handle;
static struct cpg_name daemon_name;
int message_flow_control_on;
@@ -21,6 +23,9 @@
void receive_options(struct mountgroup *mg, char *buf, int len, int from);
void receive_remount(struct mountgroup *mg, char *buf, int len, int from);
void receive_plock(struct mountgroup *mg, char *buf, int len, int from);
+void receive_own(struct mountgroup *mg, char *buf, int len, int from);
+void receive_drop(struct mountgroup *mg, char *buf, int len, int from);
+void receive_sync(struct mountgroup *mg, char *buf, int len, int from);
void receive_withdraw(struct mountgroup *mg, char *buf, int len, int from);
void receive_mount_status(struct mountgroup *mg, char *buf, int len, int from);
void receive_recovery_status(struct mountgroup *mg, char *buf, int len,
@@ -51,9 +56,14 @@
hd->nodeid = le32_to_cpu(hd->nodeid);
hd->to_nodeid = le32_to_cpu(hd->to_nodeid);
- if (hd->version[0] != GDLM_VER_MAJOR) {
- log_error("reject message version %u.%u.%u",
- hd->version[0], hd->version[1], hd->version[2]);
+ /* FIXME: we need to look at how to gracefully fail when we end up
+ with mixed incompat versions */
+
+ if (hd->version[0] != protocol_active[0]) {
+ log_error("reject message from %d version %u.%u.%u vs %u.%u.%u",
+ nodeid, hd->version[0], hd->version[1],
+ hd->version[2], protocol_active[0],
+ protocol_active[1], protocol_active[2]);
return;
}
@@ -100,6 +110,19 @@
receive_withdraw(mg, data, len, nodeid);
break;
+ case MSG_PLOCK_OWN:
+ receive_own(mg, data, len, nodeid);
+ break;
+
+ case MSG_PLOCK_DROP:
+ receive_drop(mg, data, len, nodeid);
+ break;
+
+ case MSG_PLOCK_SYNC_LOCK:
+ case MSG_PLOCK_SYNC_WAITER:
+ receive_sync(mg, data, len, nodeid);
+ break;
+
default:
log_error("unknown message type %d from %d",
hd->type, hd->nodeid);
@@ -112,11 +135,26 @@
do_deliver(nodeid, data, data_len);
}
+/* Not sure if purging plocks (driven by confchg) needs to be synchronized with
+ the other recovery steps (driven by libgroup) for a node, don't think so.
+ Is it possible for a node to have been cleared from the members_gone list
+ before this confchg is processed? */
+
void confchg_cb(cpg_handle_t handle, struct cpg_name *group_name,
struct cpg_address *member_list, int member_list_entries,
struct cpg_address *left_list, int left_list_entries,
struct cpg_address *joined_list, int joined_list_entries)
{
+ struct mountgroup *mg;
+ int i, nodeid;
+
+ for (i = 0; i < left_list_entries; i++) {
+ nodeid = left_list[i].nodeid;
+ list_for_each_entry(mg, &mounts, list) {
+ if (is_member(mg, nodeid) || is_removed(mg, nodeid))
+ purge_plocks(mg, left_list[i].nodeid, 0);
+ }
+ }
}
static cpg_callbacks_t callbacks = {
@@ -238,9 +276,9 @@
struct gdlm_header *hd = (struct gdlm_header *) buf;
int type = hd->type;
- hd->version[0] = cpu_to_le16(GDLM_VER_MAJOR);
- hd->version[1] = cpu_to_le16(GDLM_VER_MINOR);
- hd->version[2] = cpu_to_le16(GDLM_VER_PATCH);
+ hd->version[0] = cpu_to_le16(protocol_active[0]);
+ hd->version[1] = cpu_to_le16(protocol_active[1]);
+ hd->version[2] = cpu_to_le16(protocol_active[2]);
hd->type = cpu_to_le16(hd->type);
hd->nodeid = cpu_to_le32(hd->nodeid);
hd->to_nodeid = cpu_to_le32(hd->to_nodeid);
--- cluster/group/gfs_controld/lock_dlm.h 2007/11/21 17:49:16 1.30
+++ cluster/group/gfs_controld/lock_dlm.h 2007/11/28 20:49:08 1.31
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -168,6 +168,7 @@
uint64_t cp_handle;
time_t last_checkpoint_time;
time_t last_plock_time;
+ struct timeval drop_resources_last;
int needs_recovery;
int our_jid;
@@ -241,12 +242,12 @@
MSG_MOUNT_STATUS,
MSG_RECOVERY_STATUS,
MSG_RECOVERY_DONE,
+ MSG_PLOCK_OWN,
+ MSG_PLOCK_DROP,
+ MSG_PLOCK_SYNC_LOCK,
+ MSG_PLOCK_SYNC_WAITER,
};
-#define GDLM_VER_MAJOR 1
-#define GDLM_VER_MINOR 0
-#define GDLM_VER_PATCH 0
-
struct gdlm_header {
uint16_t version[3];
uint16_t type; /* MSG_ */
@@ -267,6 +268,9 @@
int do_write(int fd, void *buf, size_t count);
struct mountgroup *find_mg(char *name);
struct mountgroup *find_mg_id(uint32_t id);
+struct mg_member *find_memb_nodeid(struct mountgroup *mg, int nodeid);
+int is_member(struct mountgroup *mg, int nodeid);
+int is_removed(struct mountgroup *mg, int nodeid);
int setup_cman(void);
int process_cman(void);
--- cluster/group/gfs_controld/main.c 2007/10/26 19:33:21 1.31
+++ cluster/group/gfs_controld/main.c 2007/11/28 20:49:08 1.32
@@ -11,12 +11,29 @@
******************************************************************************/
#include "lock_dlm.h"
+#include "ccs.h"
-#define OPTION_STRING "DPhVwpl:"
+#define OPTION_STRING "DPhVwpl:o:t:c:a:"
#define LOCKFILE_NAME "/var/run/gfs_controld.pid"
+#define DEFAULT_NO_WITHDRAW 0 /* enable withdraw by default */
+#define DEFAULT_NO_PLOCK 0 /* enable plocks by default */
+
+/* max number of plock ops we will cpg-multicast per second */
#define DEFAULT_PLOCK_RATE_LIMIT 100
+/* disable ownership by default because it's a different protocol */
+#define DEFAULT_PLOCK_OWNERSHIP 0
+
+/* max frequency of drop attempts in ms */
+#define DEFAULT_DROP_RESOURCES_TIME 10000 /* 10 sec */
+
+/* max number of resources to drop per time period */
+#define DEFAULT_DROP_RESOURCES_COUNT 10
+
+/* resource not accessed for this many ms before subject to dropping */
+#define DEFAULT_DROP_RESOURCES_AGE 10000 /* 10 sec */
+
struct client {
int fd;
char type[32];
@@ -24,11 +41,41 @@
int another_mount;
};
+extern struct list_head mounts;
+extern struct list_head withdrawn_mounts;
+extern group_handle_t gh;
+
+int dmsetup_wait;
+
+/* cpg message protocol
+ 1.0.0 is initial version
+ 2.0.0 is incompatible with 1.0.0 and allows plock ownership */
+unsigned int protocol_v100[3] = {1, 0, 0};
+unsigned int protocol_v200[3] = {2, 0, 0};
+unsigned int protocol_active[3];
+
+/* user configurable */
+int config_no_withdraw;
+int config_no_plock;
+uint32_t config_plock_rate_limit;
+uint32_t config_plock_ownership;
+uint32_t config_drop_resources_time;
+uint32_t config_drop_resources_count;
+uint32_t config_drop_resources_age;
+
+/* command line settings override corresponding cluster.conf settings */
+static int opt_no_withdraw;
+static int opt_no_plock;
+static int opt_plock_rate_limit;
+static int opt_plock_ownership;
+static int opt_drop_resources_time;
+static int opt_drop_resources_count;
+static int opt_drop_resources_age;
+
static int client_maxi;
static int client_size = 0;
static struct client *client = NULL;
static struct pollfd *pollfd = NULL;
-
static int cman_fd;
static int cpg_fd;
static int listen_fd;
@@ -37,13 +84,6 @@
static int plocks_fd;
static int plocks_ci;
-extern struct list_head mounts;
-extern struct list_head withdrawn_mounts;
-extern group_handle_t gh;
-int no_withdraw;
-int no_plock;
-uint32_t plock_rate_limit = DEFAULT_PLOCK_RATE_LIMIT;
-int dmsetup_wait;
int do_read(int fd, void *buf, size_t count)
{
@@ -620,6 +660,85 @@
return rv;
}
+#define PLOCK_RATE_LIMIT_PATH "/cluster/gfs_controld/@plock_rate_limit"
+#define PLOCK_OWNERSHIP_PATH "/cluster/gfs_controld/@plock_ownership"
+#define DROP_RESOURCES_TIME_PATH "/cluster/gfs_controld/@drop_resources_time"
+#define DROP_RESOURCES_COUNT_PATH "/cluster/gfs_controld/@drop_resources_count"
+#define DROP_RESOURCES_AGE_PATH "/cluster/gfs_controld/@drop_resources_age"
+
+static void set_ccs_config(void)
+{
+ char path[PATH_MAX], *str;
+ int i = 0, cd, error;
+
+ while ((cd = ccs_connect()) < 0) {
+ sleep(1);
+ if (++i > 9 && !(i % 10))
+ log_error("connect to ccs error %d, "
+ "check ccsd or cluster status", cd);
+ }
+
+ memset(path, 0, PATH_MAX);
+ snprintf(path, PATH_MAX, "%s", PLOCK_RATE_LIMIT_PATH);
+ str = NULL;
+
+ error = ccs_get(cd, path, &str);
+ if (!error) {
+ if (!opt_plock_rate_limit)
+ config_plock_rate_limit = atoi(str);
+ }
+ if (str)
+ free(str);
+
+ memset(path, 0, PATH_MAX);
+ snprintf(path, PATH_MAX, "%s", PLOCK_OWNERSHIP_PATH);
+ str = NULL;
+
+ error = ccs_get(cd, path, &str);
+ if (!error) {
+ if (!opt_plock_ownership)
+ config_plock_ownership = atoi(str);
+ }
+ if (str)
+ free(str);
+
+ memset(path, 0, PATH_MAX);
+ snprintf(path, PATH_MAX, "%s", DROP_RESOURCES_TIME_PATH);
+ str = NULL;
+
+ error = ccs_get(cd, path, &str);
+ if (!error) {
+ if (!opt_drop_resources_time)
+ config_drop_resources_time = atoi(str);
+ }
+ if (str)
+ free(str);
+
+ memset(path, 0, PATH_MAX);
+ snprintf(path, PATH_MAX, "%s", DROP_RESOURCES_COUNT_PATH);
+ str = NULL;
+
+ error = ccs_get(cd, path, &str);
+ if (!error) {
+ if (!opt_drop_resources_count)
+ config_drop_resources_count = atoi(str);
+ }
+ if (str)
+ free(str);
+
+ memset(path, 0, PATH_MAX);
+ snprintf(path, PATH_MAX, "%s", DROP_RESOURCES_AGE_PATH);
+ str = NULL;
+
+ error = ccs_get(cd, path, &str);
+ if (!error) {
+ if (!opt_drop_resources_age)
+ config_drop_resources_age = atoi(str);
+ }
+ if (str)
+ free(str);
+}
+
static void lockfile(void)
{
int fd, error;
@@ -692,10 +811,18 @@
printf("\n");
printf(" -D Enable debugging code and don't fork\n");
printf(" -P Enable plock debugging\n");
+ printf(" -w Disable withdraw\n");
printf(" -p Disable plocks\n");
printf(" -l <limit> Limit the rate of plock operations\n");
printf(" Default is %d, set to 0 for no limit\n", DEFAULT_PLOCK_RATE_LIMIT);
- printf(" -w Disable withdraw\n");
+ printf(" -o <n> plock ownership, 1 enable, 0 disable\n");
+ printf(" Default is %d\n", DEFAULT_PLOCK_OWNERSHIP);
+ printf(" -t <ms> drop resources time (milliseconds)\n");
+ printf(" Default is %u\n", DEFAULT_DROP_RESOURCES_TIME);
+ printf(" -c <num> drop resources count\n");
+ printf(" Default is %u\n", DEFAULT_DROP_RESOURCES_COUNT);
+ printf(" -a <ms> drop resources age (milliseconds)\n");
+ printf(" Default is %u\n", DEFAULT_DROP_RESOURCES_AGE);
printf(" -h Print this help, then exit\n");
printf(" -V Print program version information, then exit\n");
}
@@ -710,10 +837,6 @@
switch (optchar) {
- case 'w':
- no_withdraw = 1;
- break;
-
case 'D':
daemon_debug_opt = 1;
break;
@@ -722,12 +845,39 @@
plock_debug_opt = 1;
break;
- case 'l':
- plock_rate_limit = atoi(optarg);
+ case 'w':
+ config_no_withdraw = 1;
+ opt_no_withdraw = 1;
break;
case 'p':
- no_plock = 1;
+ config_no_plock = 1;
+ opt_no_plock = 1;
+ break;
+
+ case 'l':
+ config_plock_rate_limit = atoi(optarg);
+ opt_plock_rate_limit = 1;
+ break;
+
+ case 'o':
+ config_plock_ownership = atoi(optarg);
+ opt_plock_ownership = 1;
+ break;
+
+ case 't':
+ config_drop_resources_time = atoi(optarg);
+ opt_drop_resources_time = 1;
+ break;
+
+ case 'c':
+ config_drop_resources_count = atoi(optarg);
+ opt_drop_resources_count = 1;
+ break;
+
+ case 'a':
+ config_drop_resources_age = atoi(optarg);
+ opt_drop_resources_age = 1;
break;
case 'h':
@@ -792,14 +942,41 @@
int main(int argc, char **argv)
{
prog_name = argv[0];
+
INIT_LIST_HEAD(&mounts);
INIT_LIST_HEAD(&withdrawn_mounts);
+ config_no_withdraw = DEFAULT_NO_WITHDRAW;
+ config_no_plock = DEFAULT_NO_PLOCK;
+ config_plock_rate_limit = DEFAULT_PLOCK_RATE_LIMIT;
+ config_plock_ownership = DEFAULT_PLOCK_OWNERSHIP;
+ config_drop_resources_time = DEFAULT_DROP_RESOURCES_TIME;
+ config_drop_resources_count = DEFAULT_DROP_RESOURCES_COUNT;
+ config_drop_resources_age = DEFAULT_DROP_RESOURCES_AGE;
+
decode_arguments(argc, argv);
if (!daemon_debug_opt)
daemonize();
+ /* ccs settings override the defaults, but not the command line */
+ set_ccs_config();
+
+ if (config_plock_ownership)
+ memcpy(protocol_active, protocol_v200, sizeof(protocol_v200));
+ else
+ memcpy(protocol_active, protocol_v100, sizeof(protocol_v100));
+
+ log_debug("config_no_withdraw %d", config_no_withdraw);
+ log_debug("config_no_plock %d", config_no_plock);
+ log_debug("config_plock_rate_limit %u", config_plock_rate_limit);
+ log_debug("config_plock_ownership %u", config_plock_ownership);
+ log_debug("config_drop_resources_time %u", config_drop_resources_time);
+ log_debug("config_drop_resources_count %u", config_drop_resources_count);
+ log_debug("config_drop_resources_age %u", config_drop_resources_age);
+ log_debug("protocol %u.%u.%u", protocol_active[0], protocol_active[1],
+ protocol_active[2]);
+
set_scheduler();
set_oom_adj(-16);
--- cluster/group/gfs_controld/plock.c 2007/07/18 20:35:50 1.33
+++ cluster/group/gfs_controld/plock.c 2007/11/28 20:49:08 1.34
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -44,20 +44,26 @@
#define CONTROL_DIR "/dev/misc"
#define CONTROL_NAME "lock_dlm_plock"
-static int control_fd = -1;
extern int our_nodeid;
-static int plocks_online = 0;
extern int message_flow_control_on;
-extern int no_plock;
-extern uint32_t plock_rate_limit;
-uint32_t plock_read_count;
-uint32_t plock_recv_count;
-uint32_t plock_rate_delays;
-struct timeval plock_read_time;
-struct timeval plock_recv_time;
-struct timeval plock_rate_last;
+/* user configurable */
+extern int config_no_plock;
+extern uint32_t config_plock_rate_limit;
+extern uint32_t config_plock_ownership;
+extern uint32_t config_drop_resources_time;
+extern uint32_t config_drop_resources_count;
+extern uint32_t config_drop_resources_age;
+
+static int plocks_online = 0;
+static uint32_t plock_read_count;
+static uint32_t plock_recv_count;
+static uint32_t plock_rate_delays;
+static struct timeval plock_read_time;
+static struct timeval plock_recv_time;
+static struct timeval plock_rate_last;
+static int control_fd = -1;
static SaCkptHandleT ckpt_handle;
static SaCkptCallbacksT callbacks = { 0, 0 };
static SaVersionT version = { 'B', 1, 1 };
@@ -76,13 +82,22 @@
uint32_t pad;
};
+#define R_GOT_UNOWN 0x00000001 /* have received owner=0 message */
+
struct resource {
struct list_head list; /* list of resources */
uint64_t number;
- struct list_head locks; /* one lock for each range */
+ int owner; /* nodeid or 0 for unowned */
+ uint32_t flags;
+ struct timeval last_access;
+ struct list_head locks; /* one lock for each range */
struct list_head waiters;
+ struct list_head pending; /* discovering r owner */
};
+#define P_SYNCING 0x00000001 /* plock has been sent as part of sync but not
+ yet received */
+
struct posix_lock {
struct list_head list; /* resource locks or waiters list */
uint32_t pid;
@@ -91,13 +106,26 @@
uint64_t end;
int ex;
int nodeid;
+ uint32_t flags;
};
struct lock_waiter {
struct list_head list;
+ uint32_t flags;
struct gdlm_plock_info info;
};
+
+static void send_own(struct mountgroup *mg, struct resource *r, int owner);
+static void save_pending_plock(struct mountgroup *mg, struct resource *r,
+ struct gdlm_plock_info *in);
+
+
+static int got_unown(struct resource *r)
+{
+ return !!(r->flags & R_GOT_UNOWN);
+}
+
static void info_bswap_out(struct gdlm_plock_info *i)
{
i->version[0] = cpu_to_le32(i->version[0]);
@@ -292,7 +320,7 @@
gettimeofday(&plock_recv_time, NULL);
gettimeofday(&plock_rate_last, NULL);
- if (no_plock)
+ if (config_no_plock)
goto control;
err = saCkptInitialize(&ckpt_handle, &callbacks, &version);
@@ -333,113 +361,6 @@
return dt;
}
-int process_plocks(void)
-{
- struct mountgroup *mg;
- struct gdlm_plock_info info;
- struct gdlm_header *hd;
- struct timeval now;
- char *buf;
- uint64_t usec;
- int len, rv;
-
- /* Don't send more messages while the cpg message queue is backed up */
-
- if (message_flow_control_on) {
- update_flow_control_status();
- if (message_flow_control_on)
- return -EBUSY;
- }
-
- /* Every N ops we check how long it's taken to do those N ops.
- If it's less than 1000 ms, we don't take any more. */
-
- if (plock_rate_limit && plock_read_count &&
- !(plock_read_count % plock_rate_limit)) {
- gettimeofday(&now, NULL);
- if (time_diff_ms(&plock_rate_last, &now) < 1000) {
- plock_rate_delays++;
- return -EBUSY;
- }
- plock_rate_last = now;
- }
-
- memset(&info, 0, sizeof(info));
-
- rv = do_read(control_fd, &info, sizeof(info));
- if (rv < 0) {
- log_debug("process_plocks: read error %d fd %d\n",
- errno, control_fd);
- return 0;
- }
-
- if (!plocks_online) {
- rv = -ENOSYS;
- goto fail;
- }
-
- mg = find_mg_id(info.fsid);
- if (!mg) {
- log_debug("process_plocks: no mg id %x", info.fsid);
- rv = -EEXIST;
- goto fail;
- }
-
- log_plock(mg, "read plock %llx %s %s %llx-%llx %d/%u/%llx w %d",
- (unsigned long long)info.number,
- op_str(info.optype),
- ex_str(info.optype, info.ex),
- (unsigned long long)info.start, (unsigned long long)info.end,
- info.nodeid, info.pid, (unsigned long long)info.owner,
- info.wait);
-
- /* report plock rate and any delays since the last report */
- plock_read_count++;
- if (!(plock_read_count % 1000)) {
- gettimeofday(&now, NULL);
- usec = dt_usec(&plock_read_time, &now) ;
- log_group(mg, "plock_read_count %u time %.3f s delays %u",
- plock_read_count, usec * 1.e-6, plock_rate_delays);
- plock_read_time = now;
- plock_rate_delays = 0;
- }
-
- len = sizeof(struct gdlm_header) + sizeof(struct gdlm_plock_info);
- buf = malloc(len);
- if (!buf) {
- rv = -ENOMEM;
- goto fail;
- }
- memset(buf, 0, len);
-
- info.nodeid = our_nodeid;
-
- hd = (struct gdlm_header *)buf;
- hd->type = MSG_PLOCK;
- hd->nodeid = our_nodeid;
- hd->to_nodeid = 0;
- memcpy(buf + sizeof(struct gdlm_header), &info, sizeof(info));
-
- info_bswap_out((struct gdlm_plock_info *) buf +
- sizeof(struct gdlm_header));
-
- rv = send_group_message(mg, len, buf);
-
- free(buf);
-
- if (rv) {
- log_error("send plock error %d", rv);
- goto fail;
- }
- return 0;
-
- fail:
- info.rv = rv;
- rv = write(control_fd, &info, sizeof(info));
-
- return 0;
-}
-
static struct resource *search_resource(struct mountgroup *mg, uint64_t number)
{
struct resource *r;
@@ -468,6 +389,7 @@
r = malloc(sizeof(struct resource));
if (!r) {
+ log_error("find_resource no memory %d", errno);
rv = -ENOMEM;
goto out;
}
@@ -476,15 +398,27 @@
r->number = number;
INIT_LIST_HEAD(&r->locks);
INIT_LIST_HEAD(&r->waiters);
+ INIT_LIST_HEAD(&r->pending);
+
+ if (config_plock_ownership)
+ r->owner = -1;
+ else
+ r->owner = 0;
list_add_tail(&r->list, &mg->resources);
out:
+ if (r)
+ gettimeofday(&r->last_access, NULL);
*r_out = r;
return rv;
}
static void put_resource(struct resource *r)
{
+ /* with ownership, resources are only freed via drop messages */
+ if (config_plock_ownership)
+ return;
+
if (list_empty(&r->locks) && list_empty(&r->waiters)) {
list_del(&r->list);
free(r);
@@ -825,6 +759,7 @@
{
struct lock_waiter *w;
+
w = malloc(sizeof(struct lock_waiter));
if (!w)
return -ENOMEM;
@@ -873,15 +808,11 @@
}
}
-static void do_lock(struct mountgroup *mg, struct gdlm_plock_info *in)
+static void do_lock(struct mountgroup *mg, struct gdlm_plock_info *in,
+ struct resource *r)
{
- struct resource *r = NULL;
int rv;
- rv = find_resource(mg, in->number, 1, &r);
- if (rv)
- goto out;
-
if (is_conflict(r, in, 0)) {
if (!in->wait)
rv = -EAGAIN;
@@ -902,41 +833,57 @@
put_resource(r);
}
-static void do_unlock(struct mountgroup *mg, struct gdlm_plock_info *in)
+static void do_unlock(struct mountgroup *mg, struct gdlm_plock_info *in,
+ struct resource *r)
{
- struct resource *r = NULL;
int rv;
- rv = find_resource(mg, in->number, 0, &r);
- if (!rv)
- rv = unlock_internal(mg, r, in);
+ rv = unlock_internal(mg, r, in);
if (in->nodeid == our_nodeid)
write_result(mg, in, rv);
- if (r) {
- do_waiters(mg, r);
- put_resource(r);
- }
+ do_waiters(mg, r);
+ put_resource(r);
}
-static void do_get(struct mountgroup *mg, struct gdlm_plock_info *in)
+/* we don't even get to this function if the getlk isn't from us */
+
+static void do_get(struct mountgroup *mg, struct gdlm_plock_info *in,
+ struct resource *r)
{
- struct resource *r = NULL;
int rv;
- rv = find_resource(mg, in->number, 0, &r);
- if (rv)
- goto out;
-
if (is_conflict(r, in, 1))
rv = 1;
else
rv = 0;
- out:
+
write_result(mg, in, rv);
}
+static void __receive_plock(struct mountgroup *mg, struct gdlm_plock_info *in,
+ int from, struct resource *r)
+{
+ switch (in->optype) {
+ case GDLM_PLOCK_OP_LOCK:
+ mg->last_plock_time = time(NULL);
+ do_lock(mg, in, r);
+ break;
+ case GDLM_PLOCK_OP_UNLOCK:
+ mg->last_plock_time = time(NULL);
+ do_unlock(mg, in, r);
+ break;
+ case GDLM_PLOCK_OP_GET:
+ do_get(mg, in, r);
+ break;
+ default:
+ log_error("receive_plock from %d optype %d", from, in->optype);
+ if (from == our_nodeid)
+ write_result(mg, in, -EINVAL);
+ }
+}
+
/* When mg members receive our options message (for our mount), one of them
saves all plock state received to that point in a checkpoint and then sends
us our journals message. We know to retrieve the plock state from the
@@ -947,16 +894,16 @@
set save_plocks (when we see our options message) can be ignored because it
should be reflected in the checkpointed state. */
-void _receive_plock(struct mountgroup *mg, char *buf, int len, int from)
+static void _receive_plock(struct mountgroup *mg, char *buf, int len, int from)
{
struct gdlm_plock_info info;
struct gdlm_header *hd = (struct gdlm_header *) buf;
+ struct resource *r = NULL;
struct timeval now;
uint64_t usec;
- int rv = 0;
+ int rv, create;
memcpy(&info, buf + sizeof(struct gdlm_header), sizeof(info));
-
info_bswap_in(&info);
log_plock(mg, "receive plock %llx %s %s %llx-%llx %d/%u/%llx w %d",
@@ -982,30 +929,80 @@
if (from != hd->nodeid || from != info.nodeid) {
log_error("receive_plock from %d header %d info %d",
from, hd->nodeid, info.nodeid);
- rv = -EINVAL;
- goto out;
+ return;
}
- switch (info.optype) {
- case GDLM_PLOCK_OP_LOCK:
- mg->last_plock_time = time(NULL);
- do_lock(mg, &info);
- break;
- case GDLM_PLOCK_OP_UNLOCK:
- mg->last_plock_time = time(NULL);
- do_unlock(mg, &info);
- break;
- case GDLM_PLOCK_OP_GET:
- do_get(mg, &info);
- break;
- default:
- log_error("receive_plock from %d optype %d", from, info.optype);
- rv = -EINVAL;
+ create = !config_plock_ownership;
+
+ rv = find_resource(mg, info.number, create, &r);
+
+ if (rv && config_plock_ownership) {
+ /* There must have been a race with a drop, so we need to
+ ignore this plock op which will be resent. If we're the one
+ who sent the plock, we need to send_own() and put it on the
+ pending list to resend once the owner is established. */
+
+ log_debug("receive_plock from %d no r %llx", from,
+ (unsigned long long)info.number);
+
+ if (from != our_nodeid)
+ return;
+
+ rv = find_resource(mg, info.number, 1, &r);
+ if (rv)
+ return;
+ send_own(mg, r, our_nodeid);
+ save_pending_plock(mg, r, &info);
+ return;
}
+ if (rv) {
+ /* r not found, rv is -ENOENT, this shouldn't happen because
+ process_plocks() creates a resource for every op */
- out:
- if (from == our_nodeid && rv)
- write_result(mg, &info, rv);
+ log_error("receive_plock from %d no r %llx %d", from,
+ (unsigned long long)info.number, rv);
+ return;
+ }
+
+ /* The owner should almost always be 0 here, but other owners may
+ be possible given odd combinations of races with drop. Odd races to
+ worry about (some seem pretty improbable):
+
+ - A sends drop, B sends plock, receive drop, receive plock.
+ This is addressed above.
+
+ - A sends drop, B sends two plocks, receive drop, receive plocks.
+ Receiving the first plock is the previous case, receiving the
+ second plock will find r with owner of -1.
+
+ - A sends drop, B sends two plocks, receive drop, C sends own,
+ receive plock, B sends own, receive own (C), receive plock,
+ receive own (B).
+
+ Haven't tried to cook up a scenario that would lead to the
+ last case below; receiving a plock from ourself and finding
+ we're the owner of r. */
+
+ /* may want to supress this if some of them are common enough */
+ if (r->owner)
+ log_error("receive_plock from %d r %llx owner %d", from,
+ (unsigned long long)info.number, r->owner);
+
+ if (!r->owner) {
+ __receive_plock(mg, &info, from, r);
+
+ } else if (r->owner == -1) {
+ if (from == our_nodeid)
+ save_pending_plock(mg, r, &info);
+
+ } else if (r->owner != our_nodeid) {
+ if (from == our_nodeid)
+ save_pending_plock(mg, r, &info);
+
+ } else if (r->owner == our_nodeid) {
+ if (from == our_nodeid)
+ __receive_plock(mg, &info, from, r);
+ }
}
void receive_plock(struct mountgroup *mg, char *buf, int len, int from)
@@ -1023,92 +1020,696 @@
_receive_plock(mg, buf, len, from);
}
-void process_saved_plocks(struct mountgroup *mg)
+static int send_struct_info(struct mountgroup *mg, struct gdlm_plock_info *in,
+ int msg_type)
{
- struct save_msg *sm, *sm2;
+ char *buf;
+ int rv, len;
+ struct gdlm_header *hd;
- if (list_empty(&mg->saved_messages))
- return;
+ len = sizeof(struct gdlm_header) + sizeof(struct gdlm_plock_info);
+ buf = malloc(len);
+ if (!buf) {
+ rv = -ENOMEM;
+ goto out;
+ }
+ memset(buf, 0, len);
- log_group(mg, "process_saved_plocks");
+ hd = (struct gdlm_header *)buf;
+ hd->type = msg_type;
+ hd->nodeid = our_nodeid;
+ hd->to_nodeid = 0;
- list_for_each_entry_safe(sm, sm2, &mg->saved_messages, list) {
- if (sm->type != MSG_PLOCK)
- continue;
- _receive_plock(mg, sm->buf, sm->len, sm->nodeid);
- list_del(&sm->list);
- free(sm);
- }
+ memcpy(buf + sizeof(struct gdlm_header), in, sizeof(*in));
+ info_bswap_out((struct gdlm_plock_info *) buf + sizeof(*hd));
+
+ rv = send_group_message(mg, len, buf);
+
+ free(buf);
+ out:
+ if (rv)
+ log_error("send plock message error %d", rv);
+ return rv;
}
-void plock_exit(void)
+static void send_plock(struct mountgroup *mg, struct resource *r,
+ struct gdlm_plock_info *in)
{
- if (plocks_online)
- saCkptFinalize(ckpt_handle);
+ send_struct_info(mg, in, MSG_PLOCK);
}
-void pack_section_buf(struct mountgroup *mg, struct resource *r)
+static void send_own(struct mountgroup *mg, struct resource *r, int owner)
{
- struct pack_plock *pp;
- struct posix_lock *po;
- struct lock_waiter *w;
- int count = 0;
-
- memset(§ion_buf, 0, sizeof(section_buf));
+ struct gdlm_plock_info info;
- pp = (struct pack_plock *) §ion_buf;
+ /* if we've already sent an own message for this resource,
+ (pending list is not empty), then we shouldn't send another */
- list_for_each_entry(po, &r->locks, list) {
- pp->start = cpu_to_le64(po->start);
- pp->end = cpu_to_le64(po->end);
- pp->owner = cpu_to_le64(po->owner);
- pp->pid = cpu_to_le32(po->pid);
- pp->nodeid = cpu_to_le32(po->nodeid);
- pp->ex = po->ex;
- pp->waiter = 0;
- pp++;
- count++;
+ if (!list_empty(&r->pending)) {
+ log_debug("send_own %llx already pending",
+ (unsigned long long)r->number);
+ return;
}
- list_for_each_entry(w, &r->waiters, list) {
- pp->start = cpu_to_le64(w->info.start);
- pp->end = cpu_to_le64(w->info.end);
- pp->owner = cpu_to_le64(w->info.owner);
- pp->pid = cpu_to_le32(w->info.pid);
- pp->nodeid = cpu_to_le32(w->info.nodeid);
- pp->ex = w->info.ex;
- pp->waiter = 1;
- pp++;
- count++;
- }
+ memset(&info, 0, sizeof(info));
+ info.number = r->number;
+ info.nodeid = owner;
- section_len = count * sizeof(struct pack_plock);
+ send_struct_info(mg, &info, MSG_PLOCK_OWN);
}
-int unpack_section_buf(struct mountgroup *mg, char *numbuf, int buflen)
+static void send_syncs(struct mountgroup *mg, struct resource *r)
{
- struct pack_plock *pp;
+ struct gdlm_plock_info info;
struct posix_lock *po;
struct lock_waiter *w;
- struct resource *r;
- int count = section_len / sizeof(struct pack_plock);
- int i;
- unsigned long long num;
+ int rv;
- r = malloc(sizeof(struct resource));
- if (!r)
- return -ENOMEM;
- memset(r, 0, sizeof(struct resource));
- INIT_LIST_HEAD(&r->locks);
- INIT_LIST_HEAD(&r->waiters);
- sscanf(numbuf, "r%llu", &num);
- r->number = num;
+ list_for_each_entry(po, &r->locks, list) {
+ memset(&info, 0, sizeof(info));
+ info.number = r->number;
+ info.start = po->start;
+ info.end = po->end;
+ info.nodeid = po->nodeid;
+ info.owner = po->owner;
+ info.pid = po->pid;
+ info.ex = po->ex;
- pp = (struct pack_plock *) §ion_buf;
+ rv = send_struct_info(mg, &info, MSG_PLOCK_SYNC_LOCK);
+ if (rv)
+ goto out;
- for (i = 0; i < count; i++) {
- if (!pp->waiter) {
- po = malloc(sizeof(struct posix_lock));
+ po->flags |= P_SYNCING;
+ }
+
+ list_for_each_entry(w, &r->waiters, list) {
+ memcpy(&info, &w->info, sizeof(info));
+
+ rv = send_struct_info(mg, &info, MSG_PLOCK_SYNC_WAITER);
+ if (rv)
+ goto out;
+
+ w->flags |= P_SYNCING;
+ }
+ out:
+ return;
+}
+
+static void send_drop(struct mountgroup *mg, struct resource *r)
+{
+ struct gdlm_plock_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.number = r->number;
+
+ send_struct_info(mg, &info, MSG_PLOCK_DROP);
+}
+
+/* plock op can't be handled until we know the owner value of the resource,
+ so the op is saved on the pending list until the r owner is established */
+
+static void save_pending_plock(struct mountgroup *mg, struct resource *r,
+ struct gdlm_plock_info *in)
+{
+ struct lock_waiter *w;
+
+ w = malloc(sizeof(struct lock_waiter));
+ if (!w) {
+ log_error("save_pending_plock no mem");
+ return;
+ }
+ memcpy(&w->info, in, sizeof(struct gdlm_plock_info));
+ list_add_tail(&w->list, &r->pending);
+}
+
+/* plock ops are on pending list waiting for ownership to be established.
+ owner has now become us, so add these plocks to r */
+
+static void add_pending_plocks(struct mountgroup *mg, struct resource *r)
+{
+ struct lock_waiter *w, *safe;
+
+ list_for_each_entry_safe(w, safe, &r->pending, list) {
+ __receive_plock(mg, &w->info, our_nodeid, r);
+ list_del(&w->list);
+ free(w);
+ }
+}
+
+/* plock ops are on pending list waiting for ownership to be established.
+ owner has now become 0, so send these plocks to everyone */
+
+static void send_pending_plocks(struct mountgroup *mg, struct resource *r)
+{
+ struct lock_waiter *w, *safe;
+
+ list_for_each_entry_safe(w, safe, &r->pending, list) {
+ send_plock(mg, r, &w->info);
+ list_del(&w->list);
+ free(w);
+ }
+}
+
+static void _receive_own(struct mountgroup *mg, char *buf, int len, int from)
+{
+ struct gdlm_header *hd = (struct gdlm_header *) buf;
+ struct gdlm_plock_info info;
+ struct resource *r;
+ int should_not_happen = 0;
+ int rv;
+
+ memcpy(&info, buf + sizeof(struct gdlm_header), sizeof(info));
+ info_bswap_in(&info);
+
+ log_plock(mg, "receive own %llx from %u owner %u",
+ (unsigned long long)info.number, hd->nodeid, info.nodeid);
+
+ rv = find_resource(mg, info.number, 1, &r);
+ if (rv)
+ return;
+
+ if (from == our_nodeid) {
+ /*
+ * received our own own message
+ */
+
+ if (info.nodeid == 0) {
+ /* we are setting owner to 0 */
+
+ if (r->owner == our_nodeid) {
+ /* we set owner to 0 when we relinquish
+ ownership */
+ should_not_happen = 1;
+ } else if (r->owner == 0) {
+ /* this happens when we relinquish ownership */
+ r->flags |= R_GOT_UNOWN;
+ } else {
+ should_not_happen = 1;
+ }
+
+ } else if (info.nodeid == our_nodeid) {
+ /* we are setting owner to ourself */
+
+ if (r->owner == -1) {
+ /* we have gained ownership */
+ r->owner = our_nodeid;
+ add_pending_plocks(mg, r);
+ } else if (r->owner == our_nodeid) {
+ should_not_happen = 1;
+ } else if (r->owner == 0) {
+ send_pending_plocks(mg, r);
+ } else {
+ /* resource is owned by other node;
+ they should set owner to 0 shortly */
+ }
+
+ } else {
+ /* we should only ever set owner to 0 or ourself */
+ should_not_happen = 1;
+ }
+ } else {
+ /*
+ * received own message from another node
+ */
+
+ if (info.nodeid == 0) {
+ /* other node is setting owner to 0 */
+
+ if (r->owner == -1) {
+ /* we should have a record of the owner before
+ it relinquishes */
+ should_not_happen = 1;
+ } else if (r->owner == our_nodeid) {
+ /* only the owner should relinquish */
+ should_not_happen = 1;
+ } else if (r->owner == 0) {
+ should_not_happen = 1;
+ } else {
+ r->owner = 0;
+ r->flags |= R_GOT_UNOWN;
+ send_pending_plocks(mg, r);
+ }
+
+ } else if (info.nodeid == from) {
+ /* other node is setting owner to itself */
+
+ if (r->owner == -1) {
+ /* normal path for a node becoming owner */
+ r->owner = from;
+ } else if (r->owner == our_nodeid) {
+ /* we relinquish our ownership: sync our local
+ plocks to everyone, then set owner to 0 */
+ send_syncs(mg, r);
+ send_own(mg, r, 0);
+ /* we need to set owner to 0 here because
+ local ops may arrive before we receive
+ our send_own message and can't be added
+ locally */
+ r->owner = 0;
+ } else if (r->owner == 0) {
+ /* can happen because we set owner to 0 before
+ we receive our send_own sent just above */
+ } else {
+ /* do nothing, current owner should be
+ relinquishing its ownership */
+ }
+
+ } else if (info.nodeid == our_nodeid) {
+ /* no one else should try to set the owner to us */
+ should_not_happen = 1;
+ } else {
+ /* a node should only ever set owner to 0 or itself */
+ should_not_happen = 1;
+ }
+ }
+
+ if (should_not_happen) {
+ log_error("receive_own from %u %llx info nodeid %d r owner %d",
+ from, (unsigned long long)r->number, info.nodeid,
+ r->owner);
+ }
+}
+
+void receive_own(struct mountgroup *mg, char *buf, int len, int from)
+{
+ if (mg->save_plocks) {
+ save_message(mg, buf, len, from, MSG_PLOCK_OWN);
+ return;
+ }
+
+ _receive_own(mg, buf, len, from);
+}
+
+static void clear_syncing_flag(struct resource *r, struct gdlm_plock_info *in)
+{
+ struct posix_lock *po;
+ struct lock_waiter *w;
+
+ list_for_each_entry(po, &r->locks, list) {
+ if ((po->flags & P_SYNCING) &&
+ in->start == po->start &&
+ in->end == po->end &&
+ in->nodeid == po->nodeid &&
+ in->owner == po->owner &&
+ in->pid == po->pid &&
+ in->ex == po->ex) {
+ po->flags &= ~P_SYNCING;
+ return;
+ }
+ }
+
+ list_for_each_entry(w, &r->waiters, list) {
+ if ((w->flags & P_SYNCING) &&
+ in->start == w->info.start &&
+ in->end == w->info.end &&
+ in->nodeid == w->info.nodeid &&
+ in->owner == w->info.owner &&
+ in->pid == w->info.pid &&
+ in->ex == w->info.ex) {
+ w->flags &= ~P_SYNCING;
+ return;
+ }
+ }
+
+ log_error("clear_syncing %llx no match %s %llx-%llx %d/%u/%llx",
+ (unsigned long long)r->number, in->ex ? "WR" : "RD",
+ (unsigned long long)in->start, (unsigned long long)in->end,
+ in->nodeid, in->pid, (unsigned long long)in->owner);
+}
+
+static void _receive_sync(struct mountgroup *mg, char *buf, int len, int from)
+{
+ struct gdlm_plock_info info;
+ struct gdlm_header *hd = (struct gdlm_header *) buf;
+ struct resource *r;
+ int rv;
+
+ memcpy(&info, buf + sizeof(struct gdlm_header), sizeof(info));
+ info_bswap_in(&info);
+
+ log_plock(mg, "receive sync %llx from %u %s %llx-%llx %d/%u/%llx",
+ (unsigned long long)info.number, from, info.ex ? "WR" : "RD",
+ (unsigned long long)info.start, (unsigned long long)info.end,
+ info.nodeid, info.pid, (unsigned long long)info.owner);
+
+ rv = find_resource(mg, info.number, 0, &r);
+ if (rv) {
+ log_error("receive_sync no r %llx from %d", info.number, from);
+ return;
+ }
+
+ if (from == our_nodeid) {
+ /* this plock now in sync on all nodes */
+ clear_syncing_flag(r, &info);
+ return;
+ }
+
+ if (hd->type == MSG_PLOCK_SYNC_LOCK)
+ add_lock(r, info.nodeid, info.owner, info.pid, !info.ex,
+ info.start, info.end);
+ else if (hd->type == MSG_PLOCK_SYNC_WAITER)
+ add_waiter(mg, r, &info);
+}
+
+void receive_sync(struct mountgroup *mg, char *buf, int len, int from)
+{
+ struct gdlm_header *hd = (struct gdlm_header *) buf;
+
+ if (mg->save_plocks) {
+ save_message(mg, buf, len, from, hd->type);
+ return;
+ }
+
+ _receive_sync(mg, buf, len, from);
+}
+
+static void _receive_drop(struct mountgroup *mg, char *buf, int len, int from)
+{
+ struct gdlm_plock_info info;
+ struct resource *r;
+ int rv;
+
+ memcpy(&info, buf + sizeof(struct gdlm_header), sizeof(info));
+ info_bswap_in(&info);
+
+ log_plock(mg, "receive drop %llx from %u",
+ (unsigned long long)info.number, from);
+
+ rv = find_resource(mg, info.number, 0, &r);
+ if (rv) {
+ /* we'll find no r if two nodes sent drop at once */
+ log_debug("receive_drop from %d no r %llx", from,
+ (unsigned long long)info.number);
+ return;
+ }
+
+ if (r->owner != 0) {
+ /* shouldn't happen */
+ log_error("receive_drop from %d r %llx owner %d", from,
+ (unsigned long long)r->number, r->owner);
+ return;
+ }
+
+ if (!list_empty(&r->pending)) {
+ /* shouldn't happen */
+ log_error("receive_drop from %d r %llx pending op", from,
+ (unsigned long long)r->number);
+ return;
+ }
+
+ /* the decision to drop or not must be based on things that are
+ guaranteed to be the same on all nodes */
+
+ if (list_empty(&r->locks) && list_empty(&r->waiters)) {
+ list_del(&r->list);
+ free(r);
+ } else {
+ /* A sent drop, B sent a plock, receive plock, receive drop */
+ log_debug("receive_drop from %d r %llx in use", from,
+ (unsigned long long)r->number);
+ }
+}
+
+void receive_drop(struct mountgroup *mg, char *buf, int len, int from)
+{
+ if (mg->save_plocks) {
+ save_message(mg, buf, len, from, MSG_PLOCK_DROP);
+ return;
+ }
+
+ _receive_drop(mg, buf, len, from);
+}
+
+/* We only drop resources from the unowned state to simplify things.
+ If we want to drop a resource we own, we unown/relinquish it first. */
+
+/* FIXME: in the transition from owner = us, to owner = 0, to drop;
+ we want the second period to be shorter than the first */
+
+static int drop_resources(struct mountgroup *mg)
+{
+ struct resource *r;
+ struct timeval now;
+ int count = 0;
+
+ gettimeofday(&now, NULL);
+
+ /* try to drop the oldest, unused resources */
+
+ list_for_each_entry_reverse(r, &mg->resources, list) {
+ if (count >= config_drop_resources_count)
+ break;
+ if (r->owner && r->owner != our_nodeid)
+ continue;
+ if (time_diff_ms(&r->last_access, &now) <
+ config_drop_resources_age)
+ continue;
+
+ if (list_empty(&r->locks) && list_empty(&r->waiters)) {
+ if (r->owner == our_nodeid) {
+ send_own(mg, r, 0);
+ r->owner = 0;
+ } else if (r->owner == 0 && got_unown(r)) {
+ send_drop(mg, r);
+ }
+
+ count++;
+ }
+ }
+
+ return 0;
+}
+
+int process_plocks(void)
+{
+ struct mountgroup *mg;
+ struct resource *r;
+ struct gdlm_plock_info info;
+ struct timeval now;
+ uint64_t usec;
+ int rv;
+
+ /* Don't send more messages while the cpg message queue is backed up */
+
+ if (message_flow_control_on) {
+ update_flow_control_status();
+ if (message_flow_control_on)
+ return -EBUSY;
+ }
+
+ gettimeofday(&now, NULL);
+
+ /* Every N ops we check how long it's taken to do those N ops.
+ If it's less than 1000 ms, we don't take any more. */
+
+ if (config_plock_rate_limit && plock_read_count &&
+ !(plock_read_count % config_plock_rate_limit)) {
+ if (time_diff_ms(&plock_rate_last, &now) < 1000) {
+ plock_rate_delays++;
+ return -EBUSY;
+ }
+ plock_rate_last = now;
+ }
+
+ memset(&info, 0, sizeof(info));
+
+ rv = do_read(control_fd, &info, sizeof(info));
+ if (rv < 0) {
+ log_debug("process_plocks: read error %d fd %d\n",
+ errno, control_fd);
+ return 0;
+ }
+
+ /* kernel doesn't set the nodeid field */
+ info.nodeid = our_nodeid;
+
+ if (!plocks_online) {
+ rv = -ENOSYS;
+ goto fail;
+ }
+
+ mg = find_mg_id(info.fsid);
+ if (!mg) {
+ log_debug("process_plocks: no mg id %x", info.fsid);
+ rv = -EEXIST;
+ goto fail;
+ }
+
+ log_plock(mg, "read plock %llx %s %s %llx-%llx %d/%u/%llx w %d",
+ (unsigned long long)info.number,
+ op_str(info.optype),
+ ex_str(info.optype, info.ex),
+ (unsigned long long)info.start, (unsigned long long)info.end,
+ info.nodeid, info.pid, (unsigned long long)info.owner,
+ info.wait);
+
+ /* report plock rate and any delays since the last report */
+ plock_read_count++;
+ if (!(plock_read_count % 1000)) {
+ usec = dt_usec(&plock_read_time, &now) ;
+ log_group(mg, "plock_read_count %u time %.3f s delays %u",
+ plock_read_count, usec * 1.e-6, plock_rate_delays);
+ plock_read_time = now;
+ plock_rate_delays = 0;
+ }
+
+ rv = find_resource(mg, info.number, 1, &r);
+ if (rv)
+ goto fail;
+
+ if (r->owner == 0) {
+ /* plock state replicated on all nodes */
+ send_plock(mg, r, &info);
+
+ } else if (r->owner == our_nodeid) {
+ /* we are the owner of r, so our plocks are local */
+ __receive_plock(mg, &info, our_nodeid, r);
+
+ } else {
+ /* r owner is -1: r is new, try to become the owner;
+ r owner > 0: tell other owner to give up ownership;
+ both done with a message trying to set owner to ourself */
+ send_own(mg, r, our_nodeid);
+ save_pending_plock(mg, r, &info);
+ }
+
+ if (config_plock_ownership &&
+ time_diff_ms(&mg->drop_resources_last, &now) >=
+ config_drop_resources_time) {
+ mg->drop_resources_last = now;
+ drop_resources(mg);
+ }
+
+ return 0;
+
+ fail:
+ info.rv = rv;
+ rv = write(control_fd, &info, sizeof(info));
+
+ return 0;
+}
+
+void process_saved_plocks(struct mountgroup *mg)
+{
+ struct save_msg *sm, *sm2;
+
+ if (list_empty(&mg->saved_messages))
+ return;
+
+ log_group(mg, "process_saved_plocks");
+
+ list_for_each_entry_safe(sm, sm2, &mg->saved_messages, list) {
+ switch (sm->type) {
+ case MSG_PLOCK:
+ _receive_plock(mg, sm->buf, sm->len, sm->nodeid);
+ break;
+ case MSG_PLOCK_OWN:
+ _receive_own(mg, sm->buf, sm->len, sm->nodeid);
+ break;
+ case MSG_PLOCK_DROP:
+ _receive_drop(mg, sm->buf, sm->len, sm->nodeid);
+ break;
+ case MSG_PLOCK_SYNC_LOCK:
+ case MSG_PLOCK_SYNC_WAITER:
+ _receive_sync(mg, sm->buf, sm->len, sm->nodeid);
+ break;
+ default:
+ continue;
+ }
+
+ list_del(&sm->list);
+ free(sm);
+ }
+}
+
+void plock_exit(void)
+{
+ if (plocks_online)
+ saCkptFinalize(ckpt_handle);
+}
+
+/* locks still marked SYNCING should not go into the ckpt; the new node
+ will get those locks by receiving PLOCK_SYNC messages */
+
+static void pack_section_buf(struct mountgroup *mg, struct resource *r)
+{
+ struct pack_plock *pp;
+ struct posix_lock *po;
+ struct lock_waiter *w;
+ int count = 0;
+
+ /* plocks on owned resources are not replicated on other nodes */
+ if (r->owner == our_nodeid)
+ return;
+
+ pp = (struct pack_plock *) §ion_buf;
+
+ list_for_each_entry(po, &r->locks, list) {
+ if (po->flags & P_SYNCING)
+ continue;
+ pp->start = cpu_to_le64(po->start);
+ pp->end = cpu_to_le64(po->end);
+ pp->owner = cpu_to_le64(po->owner);
+ pp->pid = cpu_to_le32(po->pid);
+ pp->nodeid = cpu_to_le32(po->nodeid);
+ pp->ex = po->ex;
+ pp->waiter = 0;
+ pp++;
+ count++;
+ }
+
+ list_for_each_entry(w, &r->waiters, list) {
+ if (w->flags & P_SYNCING)
+ continue;
+ pp->start = cpu_to_le64(w->info.start);
+ pp->end = cpu_to_le64(w->info.end);
+ pp->owner = cpu_to_le64(w->info.owner);
+ pp->pid = cpu_to_le32(w->info.pid);
+ pp->nodeid = cpu_to_le32(w->info.nodeid);
+ pp->ex = w->info.ex;
+ pp->waiter = 1;
+ pp++;
+ count++;
+ }
+
+ section_len = count * sizeof(struct pack_plock);
+}
+
+static int unpack_section_buf(struct mountgroup *mg, char *numbuf, int buflen)
+{
+ struct pack_plock *pp;
+ struct posix_lock *po;
+ struct lock_waiter *w;
+ struct resource *r;
+ int count = section_len / sizeof(struct pack_plock);
+ int i, owner = 0;
+ unsigned long long num;
+ struct timeval now;
+
+ gettimeofday(&now, NULL);
+
+ r = malloc(sizeof(struct resource));
+ if (!r)
+ return -ENOMEM;
+ memset(r, 0, sizeof(struct resource));
+ INIT_LIST_HEAD(&r->locks);
+ INIT_LIST_HEAD(&r->waiters);
+ INIT_LIST_HEAD(&r->pending);
+
+ if (config_plock_ownership)
+ sscanf(numbuf, "r%llu.%d", &num, &owner);
+ else
+ sscanf(numbuf, "r%llu", &num);
+
+ r->number = num;
+ r->owner = owner;
+ r->last_access = now;
+
+ pp = (struct pack_plock *) §ion_buf;
+
+ for (i = 0; i < count; i++) {
+ if (!pp->waiter) {
+ po = malloc(sizeof(struct posix_lock));
po->start = le64_to_cpu(pp->start);
po->end = le64_to_cpu(pp->end);
po->owner = le64_to_cpu(pp->owner);
@@ -1208,6 +1809,19 @@
return _unlink_checkpoint(mg, &name);
}
+/*
+ * section id is r<inodenum>.<owner>, the maximum string length is:
+ * "r" prefix = 1 strlen("r")
+ * max uint64 = 20 strlen("18446744073709551615")
+ * "." before owner = 1 strlen(".")
+ * max int = 11 strlen("-2147483647")
+ * \0 at end = 1
+ * ---------------------
+ * 34 SECTION_NAME_LEN
+ */
+
+#define SECTION_NAME_LEN 34
+
/* Copy all plock state into a checkpoint so new node can retrieve it. The
node creating the ckpt for the mounter needs to be the same node that's
sending the mounter its journals message (i.e. the low nodeid). The new
@@ -1228,12 +1842,12 @@
SaCkptCheckpointOpenFlagsT flags;
SaNameT name;
SaAisErrorT rv;
- char buf[32];
+ char buf[SECTION_NAME_LEN];
struct resource *r;
struct posix_lock *po;
struct lock_waiter *w;
int r_count, lock_count, total_size, section_size, max_section_size;
- int len;
+ int len, owner;
if (!plocks_online)
return;
@@ -1264,6 +1878,9 @@
max_section_size = 0;
list_for_each_entry(r, &mg->resources, list) {
+ if (r->owner == -1)
+ continue;
+
r_count++;
section_size = 0;
list_for_each_entry(po, &r->locks, list) {
@@ -1290,9 +1907,7 @@
attr.retentionDuration = SA_TIME_MAX;
attr.maxSections = r_count + 1; /* don't know why we need +1 */
attr.maxSectionSize = max_section_size;
- attr.maxSectionIdSize = 22;
-
- /* 22 = 20 digits in max uint64 + "r" prefix + \0 suffix */
+ attr.maxSectionIdSize = SECTION_NAME_LEN;
flags = SA_CKPT_CHECKPOINT_READ |
SA_CKPT_CHECKPOINT_WRITE |
@@ -1318,15 +1933,49 @@
(unsigned long long)h);
mg->cp_handle = (uint64_t) h;
+ /* - If r owner is -1, ckpt nothing.
+ - If r owner is us, ckpt owner of us and no plocks.
+ - If r owner is other, ckpt that owner and any plocks we have on r
+ (they've just been synced but owner=0 msg not recved yet).
+ - If r owner is 0 and !got_unown, then we've just unowned r;
+ ckpt owner of us and any plocks that don't have SYNCING set
+ (plocks with SYNCING will be handled by our sync messages).
+ - If r owner is 0 and got_unown, then ckpt owner 0 and all plocks;
+ (there should be no SYNCING plocks) */
+
list_for_each_entry(r, &mg->resources, list) {
- memset(&buf, 0, 32);
- len = snprintf(buf, 32, "r%llu", (unsigned long long)r->number);
+ if (r->owner == -1)
+ continue;
+ else if (r->owner == our_nodeid)
+ owner = our_nodeid;
+ else if (r->owner)
+ owner = r->owner;
+ else if (!r->owner && !got_unown(r))
+ owner = our_nodeid;
+ else if (!r->owner)
+ owner = 0;
+ else {
+ log_error("store_plocks owner %d r %llx", r->owner,
+ (unsigned long long)r->number);
+ continue;
+ }
+
+ memset(&buf, 0, sizeof(buf));
+ if (config_plock_ownership)
+ len = snprintf(buf, SECTION_NAME_LEN, "r%llu.%d",
+ (unsigned long long)r->number, owner);
+ else
+ len = snprintf(buf, SECTION_NAME_LEN, "r%llu",
+ (unsigned long long)r->number);
section_id.id = (void *)buf;
section_id.idLen = len + 1;
section_attr.sectionId = §ion_id;
section_attr.expirationTime = SA_TIME_END;
+ memset(§ion_buf, 0, sizeof(section_buf));
+ section_len = 0;
+
pack_section_buf(mg, r);
log_group(mg, "store_plocks: section size %u id %u \"%s\"",
@@ -1377,7 +2026,7 @@
SaCkptIOVectorElementT iov;
SaNameT name;
SaAisErrorT rv;
- char buf[32];
+ char buf[SECTION_NAME_LEN];
int len;
if (!plocks_online)
@@ -1440,8 +2089,8 @@
iov.dataSize = desc.sectionSize;
iov.dataOffset = 0;
- memset(&buf, 0, 32);
- snprintf(buf, 32, "%s", desc.sectionId.id);
+ memset(&buf, 0, sizeof(buf));
+ snprintf(buf, SECTION_NAME_LEN, "%s", desc.sectionId.id);
log_group(mg, "retrieve_plocks: section size %llu id %u \"%s\"",
(unsigned long long)iov.dataSize, iov.sectionId.idLen,
buf);
@@ -1488,6 +2137,10 @@
saCkptCheckpointClose(h);
}
+/* Called when a node has failed, or we're unmounting. For a node failure, we
+ need to call this when the cpg confchg arrives so that we're guaranteed all
+ nodes do this in the same sequence wrt other messages. */
+
void purge_plocks(struct mountgroup *mg, int nodeid, int unmount)
{
struct posix_lock *po, *po2;
@@ -1512,11 +2165,23 @@
}
}
- if (list_empty(&r->locks) && list_empty(&r->waiters)) {
+ /* TODO: haven't thought carefully about how this transition
+ to owner 0 might interact with other owner messages in
+ progress. */
+
+ if (r->owner == nodeid) {
+ r->owner = 0;
+ send_pending_plocks(mg, r);
+ }
+
+ if (!list_empty(&r->waiters))
+ do_waiters(mg, r);
+
+ if (!config_plock_ownership &&
+ list_empty(&r->locks) && list_empty(&r->waiters)) {
list_del(&r->list);
free(r);
- } else
- do_waiters(mg, r);
+ }
}
if (purged)
@@ -1549,7 +2214,6 @@
return -1;
list_for_each_entry(r, &mg->resources, list) {
-
list_for_each_entry(po, &r->locks, list) {
snprintf(line, MAXLINE,
"%llu %s %llu-%llu nodeid %d pid %u owner %llx\n",
--- cluster/group/gfs_controld/recover.c 2007/09/04 19:22:52 1.33
+++ cluster/group/gfs_controld/recover.c 2007/11/28 20:49:08 1.34
@@ -19,7 +19,7 @@
extern char *clustername;
extern int our_nodeid;
extern group_handle_t gh;
-extern int no_withdraw;
+extern int config_no_withdraw;
extern int dmsetup_wait;
struct list_head mounts;
@@ -1328,8 +1328,6 @@
memb->spectator,
memb->wait_gfs_recover_done);
- purge_plocks(mg, memb->nodeid, 0);
-
if (mg->master_nodeid == memb->nodeid &&
memb->gone_type == GROUP_NODE_FAILED)
master_failed = 1;
@@ -2712,7 +2710,7 @@
char *name = strstr(table, ":") + 1;
int rv;
- if (no_withdraw) {
+ if (config_no_withdraw) {
log_error("withdraw feature not enabled");
return 0;
}
^ permalink raw reply [flat|nested] 4+ messages in thread
* [Cluster-devel] cluster/group/gfs_controld cpg.c lock_dlm.h ma ...
@ 2007-12-05 22:11 teigland
0 siblings, 0 replies; 4+ messages in thread
From: teigland @ 2007-12-05 22:11 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL5
Changes by: teigland at sourceware.org 2007-12-05 22:11:32
Modified files:
group/gfs_controld: cpg.c lock_dlm.h main.c plock.c recover.c
Log message:
bz 359271
A performance optimization for plocks. This speeds up locks that are
repeatedly accessed by processes on a single node. Plocks used by
processes on multiple nodes work the same way as before. The
optimization is disabled by default, and can be enabled by setting
<gfs_controld plock_ownership="1"/>
in cluster.conf, or by starting gfs_controld with "-o1". It is disabled
by default because enabling it breaks compatibility with previous
versions of gfs_controld. If all nodes in the cluster are running this
version, then plock_ownership can be enabled.
The plock_ownership mode needs extensive testing. This also introduces
some minor changes when plock_ownership is disabled, so new testing is
also required in that mode. Abhi and I worked on this together.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/cpg.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.9.2.3&r2=1.9.2.4
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.21.2.9&r2=1.21.2.10
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/main.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.18.2.13&r2=1.18.2.14
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.25.2.7&r2=1.25.2.8
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/recover.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.23.2.10&r2=1.23.2.11
--- cluster/group/gfs_controld/cpg.c 2007/07/19 20:23:16 1.9.2.3
+++ cluster/group/gfs_controld/cpg.c 2007/12/05 22:11:32 1.9.2.4
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2006 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -13,6 +13,8 @@
#include <openais/cpg.h>
#include "lock_dlm.h"
+extern struct list_head mounts;
+extern unsigned int protocol_active[3];
static cpg_handle_t daemon_handle;
static struct cpg_name daemon_name;
int message_flow_control_on;
@@ -21,6 +23,9 @@
void receive_options(struct mountgroup *mg, char *buf, int len, int from);
void receive_remount(struct mountgroup *mg, char *buf, int len, int from);
void receive_plock(struct mountgroup *mg, char *buf, int len, int from);
+void receive_own(struct mountgroup *mg, char *buf, int len, int from);
+void receive_drop(struct mountgroup *mg, char *buf, int len, int from);
+void receive_sync(struct mountgroup *mg, char *buf, int len, int from);
void receive_withdraw(struct mountgroup *mg, char *buf, int len, int from);
void receive_mount_status(struct mountgroup *mg, char *buf, int len, int from);
void receive_recovery_status(struct mountgroup *mg, char *buf, int len,
@@ -51,9 +56,14 @@
hd->nodeid = le32_to_cpu(hd->nodeid);
hd->to_nodeid = le32_to_cpu(hd->to_nodeid);
- if (hd->version[0] != GDLM_VER_MAJOR) {
- log_error("reject message version %u.%u.%u",
- hd->version[0], hd->version[1], hd->version[2]);
+ /* FIXME: we need to look at how to gracefully fail when we end up
+ with mixed incompat versions */
+
+ if (hd->version[0] != protocol_active[0]) {
+ log_error("reject message from %d version %u.%u.%u vs %u.%u.%u",
+ nodeid, hd->version[0], hd->version[1],
+ hd->version[2], protocol_active[0],
+ protocol_active[1], protocol_active[2]);
return;
}
@@ -100,6 +110,19 @@
receive_withdraw(mg, data, len, nodeid);
break;
+ case MSG_PLOCK_OWN:
+ receive_own(mg, data, len, nodeid);
+ break;
+
+ case MSG_PLOCK_DROP:
+ receive_drop(mg, data, len, nodeid);
+ break;
+
+ case MSG_PLOCK_SYNC_LOCK:
+ case MSG_PLOCK_SYNC_WAITER:
+ receive_sync(mg, data, len, nodeid);
+ break;
+
default:
log_error("unknown message type %d from %d",
hd->type, hd->nodeid);
@@ -112,11 +135,26 @@
do_deliver(nodeid, data, data_len);
}
+/* Not sure if purging plocks (driven by confchg) needs to be synchronized with
+ the other recovery steps (driven by libgroup) for a node, don't think so.
+ Is it possible for a node to have been cleared from the members_gone list
+ before this confchg is processed? */
+
void confchg_cb(cpg_handle_t handle, struct cpg_name *group_name,
struct cpg_address *member_list, int member_list_entries,
struct cpg_address *left_list, int left_list_entries,
struct cpg_address *joined_list, int joined_list_entries)
{
+ struct mountgroup *mg;
+ int i, nodeid;
+
+ for (i = 0; i < left_list_entries; i++) {
+ nodeid = left_list[i].nodeid;
+ list_for_each_entry(mg, &mounts, list) {
+ if (is_member(mg, nodeid) || is_removed(mg, nodeid))
+ purge_plocks(mg, left_list[i].nodeid, 0);
+ }
+ }
}
static cpg_callbacks_t callbacks = {
@@ -238,9 +276,9 @@
struct gdlm_header *hd = (struct gdlm_header *) buf;
int type = hd->type;
- hd->version[0] = cpu_to_le16(GDLM_VER_MAJOR);
- hd->version[1] = cpu_to_le16(GDLM_VER_MINOR);
- hd->version[2] = cpu_to_le16(GDLM_VER_PATCH);
+ hd->version[0] = cpu_to_le16(protocol_active[0]);
+ hd->version[1] = cpu_to_le16(protocol_active[1]);
+ hd->version[2] = cpu_to_le16(protocol_active[2]);
hd->type = cpu_to_le16(hd->type);
hd->nodeid = cpu_to_le32(hd->nodeid);
hd->to_nodeid = cpu_to_le32(hd->to_nodeid);
--- cluster/group/gfs_controld/lock_dlm.h 2007/11/21 17:50:15 1.21.2.9
+++ cluster/group/gfs_controld/lock_dlm.h 2007/12/05 22:11:32 1.21.2.10
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -168,6 +168,7 @@
uint64_t cp_handle;
time_t last_checkpoint_time;
time_t last_plock_time;
+ struct timeval drop_resources_last;
int needs_recovery;
int our_jid;
@@ -241,12 +242,12 @@
MSG_MOUNT_STATUS,
MSG_RECOVERY_STATUS,
MSG_RECOVERY_DONE,
+ MSG_PLOCK_OWN,
+ MSG_PLOCK_DROP,
+ MSG_PLOCK_SYNC_LOCK,
+ MSG_PLOCK_SYNC_WAITER,
};
-#define GDLM_VER_MAJOR 1
-#define GDLM_VER_MINOR 0
-#define GDLM_VER_PATCH 0
-
struct gdlm_header {
uint16_t version[3];
uint16_t type; /* MSG_ */
@@ -267,6 +268,9 @@
int do_write(int fd, void *buf, size_t count);
struct mountgroup *find_mg(char *name);
struct mountgroup *find_mg_id(uint32_t id);
+struct mg_member *find_memb_nodeid(struct mountgroup *mg, int nodeid);
+int is_member(struct mountgroup *mg, int nodeid);
+int is_removed(struct mountgroup *mg, int nodeid);
int setup_cman(void);
int process_cman(void);
--- cluster/group/gfs_controld/main.c 2007/10/26 20:34:40 1.18.2.13
+++ cluster/group/gfs_controld/main.c 2007/12/05 22:11:32 1.18.2.14
@@ -11,12 +11,29 @@
******************************************************************************/
#include "lock_dlm.h"
+#include "ccs.h"
-#define OPTION_STRING "DPhVwpl:"
+#define OPTION_STRING "DPhVwpl:o:t:c:a:"
#define LOCKFILE_NAME "/var/run/gfs_controld.pid"
+#define DEFAULT_NO_WITHDRAW 0 /* enable withdraw by default */
+#define DEFAULT_NO_PLOCK 0 /* enable plocks by default */
+
+/* max number of plock ops we will cpg-multicast per second */
#define DEFAULT_PLOCK_RATE_LIMIT 100
+/* disable ownership by default because it's a different protocol */
+#define DEFAULT_PLOCK_OWNERSHIP 0
+
+/* max frequency of drop attempts in ms */
+#define DEFAULT_DROP_RESOURCES_TIME 10000 /* 10 sec */
+
+/* max number of resources to drop per time period */
+#define DEFAULT_DROP_RESOURCES_COUNT 10
+
+/* resource not accessed for this many ms before subject to dropping */
+#define DEFAULT_DROP_RESOURCES_AGE 10000 /* 10 sec */
+
struct client {
int fd;
char type[32];
@@ -24,11 +41,41 @@
int another_mount;
};
+extern struct list_head mounts;
+extern struct list_head withdrawn_mounts;
+extern group_handle_t gh;
+
+int dmsetup_wait;
+
+/* cpg message protocol
+ 1.0.0 is initial version
+ 2.0.0 is incompatible with 1.0.0 and allows plock ownership */
+unsigned int protocol_v100[3] = {1, 0, 0};
+unsigned int protocol_v200[3] = {2, 0, 0};
+unsigned int protocol_active[3];
+
+/* user configurable */
+int config_no_withdraw;
+int config_no_plock;
+uint32_t config_plock_rate_limit;
+uint32_t config_plock_ownership;
+uint32_t config_drop_resources_time;
+uint32_t config_drop_resources_count;
+uint32_t config_drop_resources_age;
+
+/* command line settings override corresponding cluster.conf settings */
+static int opt_no_withdraw;
+static int opt_no_plock;
+static int opt_plock_rate_limit;
+static int opt_plock_ownership;
+static int opt_drop_resources_time;
+static int opt_drop_resources_count;
+static int opt_drop_resources_age;
+
static int client_maxi;
static int client_size = 0;
static struct client *client = NULL;
static struct pollfd *pollfd = NULL;
-
static int cman_fd;
static int cpg_fd;
static int listen_fd;
@@ -37,13 +84,6 @@
static int plocks_fd;
static int plocks_ci;
-extern struct list_head mounts;
-extern struct list_head withdrawn_mounts;
-extern group_handle_t gh;
-int no_withdraw;
-int no_plock;
-uint32_t plock_rate_limit = DEFAULT_PLOCK_RATE_LIMIT;
-int dmsetup_wait;
int do_read(int fd, void *buf, size_t count)
{
@@ -620,6 +660,85 @@
return rv;
}
+#define PLOCK_RATE_LIMIT_PATH "/cluster/gfs_controld/@plock_rate_limit"
+#define PLOCK_OWNERSHIP_PATH "/cluster/gfs_controld/@plock_ownership"
+#define DROP_RESOURCES_TIME_PATH "/cluster/gfs_controld/@drop_resources_time"
+#define DROP_RESOURCES_COUNT_PATH "/cluster/gfs_controld/@drop_resources_count"
+#define DROP_RESOURCES_AGE_PATH "/cluster/gfs_controld/@drop_resources_age"
+
+static void set_ccs_config(void)
+{
+ char path[PATH_MAX], *str;
+ int i = 0, cd, error;
+
+ while ((cd = ccs_connect()) < 0) {
+ sleep(1);
+ if (++i > 9 && !(i % 10))
+ log_error("connect to ccs error %d, "
+ "check ccsd or cluster status", cd);
+ }
+
+ memset(path, 0, PATH_MAX);
+ snprintf(path, PATH_MAX, "%s", PLOCK_RATE_LIMIT_PATH);
+ str = NULL;
+
+ error = ccs_get(cd, path, &str);
+ if (!error) {
+ if (!opt_plock_rate_limit)
+ config_plock_rate_limit = atoi(str);
+ }
+ if (str)
+ free(str);
+
+ memset(path, 0, PATH_MAX);
+ snprintf(path, PATH_MAX, "%s", PLOCK_OWNERSHIP_PATH);
+ str = NULL;
+
+ error = ccs_get(cd, path, &str);
+ if (!error) {
+ if (!opt_plock_ownership)
+ config_plock_ownership = atoi(str);
+ }
+ if (str)
+ free(str);
+
+ memset(path, 0, PATH_MAX);
+ snprintf(path, PATH_MAX, "%s", DROP_RESOURCES_TIME_PATH);
+ str = NULL;
+
+ error = ccs_get(cd, path, &str);
+ if (!error) {
+ if (!opt_drop_resources_time)
+ config_drop_resources_time = atoi(str);
+ }
+ if (str)
+ free(str);
+
+ memset(path, 0, PATH_MAX);
+ snprintf(path, PATH_MAX, "%s", DROP_RESOURCES_COUNT_PATH);
+ str = NULL;
+
+ error = ccs_get(cd, path, &str);
+ if (!error) {
+ if (!opt_drop_resources_count)
+ config_drop_resources_count = atoi(str);
+ }
+ if (str)
+ free(str);
+
+ memset(path, 0, PATH_MAX);
+ snprintf(path, PATH_MAX, "%s", DROP_RESOURCES_AGE_PATH);
+ str = NULL;
+
+ error = ccs_get(cd, path, &str);
+ if (!error) {
+ if (!opt_drop_resources_age)
+ config_drop_resources_age = atoi(str);
+ }
+ if (str)
+ free(str);
+}
+
static void lockfile(void)
{
int fd, error;
@@ -692,10 +811,18 @@
printf("\n");
printf(" -D Enable debugging code and don't fork\n");
printf(" -P Enable plock debugging\n");
+ printf(" -w Disable withdraw\n");
printf(" -p Disable plocks\n");
printf(" -l <limit> Limit the rate of plock operations\n");
printf(" Default is %d, set to 0 for no limit\n", DEFAULT_PLOCK_RATE_LIMIT);
- printf(" -w Disable withdraw\n");
+ printf(" -o <n> plock ownership, 1 enable, 0 disable\n");
+ printf(" Default is %d\n", DEFAULT_PLOCK_OWNERSHIP);
+ printf(" -t <ms> drop resources time (milliseconds)\n");
+ printf(" Default is %u\n", DEFAULT_DROP_RESOURCES_TIME);
+ printf(" -c <num> drop resources count\n");
+ printf(" Default is %u\n", DEFAULT_DROP_RESOURCES_COUNT);
+ printf(" -a <ms> drop resources age (milliseconds)\n");
+ printf(" Default is %u\n", DEFAULT_DROP_RESOURCES_AGE);
printf(" -h Print this help, then exit\n");
printf(" -V Print program version information, then exit\n");
}
@@ -710,10 +837,6 @@
switch (optchar) {
- case 'w':
- no_withdraw = 1;
- break;
-
case 'D':
daemon_debug_opt = 1;
break;
@@ -722,12 +845,39 @@
plock_debug_opt = 1;
break;
- case 'l':
- plock_rate_limit = atoi(optarg);
+ case 'w':
+ config_no_withdraw = 1;
+ opt_no_withdraw = 1;
break;
case 'p':
- no_plock = 1;
+ config_no_plock = 1;
+ opt_no_plock = 1;
+ break;
+
+ case 'l':
+ config_plock_rate_limit = atoi(optarg);
+ opt_plock_rate_limit = 1;
+ break;
+
+ case 'o':
+ config_plock_ownership = atoi(optarg);
+ opt_plock_ownership = 1;
+ break;
+
+ case 't':
+ config_drop_resources_time = atoi(optarg);
+ opt_drop_resources_time = 1;
+ break;
+
+ case 'c':
+ config_drop_resources_count = atoi(optarg);
+ opt_drop_resources_count = 1;
+ break;
+
+ case 'a':
+ config_drop_resources_age = atoi(optarg);
+ opt_drop_resources_age = 1;
break;
case 'h':
@@ -792,14 +942,41 @@
int main(int argc, char **argv)
{
prog_name = argv[0];
+
INIT_LIST_HEAD(&mounts);
INIT_LIST_HEAD(&withdrawn_mounts);
+ config_no_withdraw = DEFAULT_NO_WITHDRAW;
+ config_no_plock = DEFAULT_NO_PLOCK;
+ config_plock_rate_limit = DEFAULT_PLOCK_RATE_LIMIT;
+ config_plock_ownership = DEFAULT_PLOCK_OWNERSHIP;
+ config_drop_resources_time = DEFAULT_DROP_RESOURCES_TIME;
+ config_drop_resources_count = DEFAULT_DROP_RESOURCES_COUNT;
+ config_drop_resources_age = DEFAULT_DROP_RESOURCES_AGE;
+
decode_arguments(argc, argv);
if (!daemon_debug_opt)
daemonize();
+ /* ccs settings override the defaults, but not the command line */
+ set_ccs_config();
+
+ if (config_plock_ownership)
+ memcpy(protocol_active, protocol_v200, sizeof(protocol_v200));
+ else
+ memcpy(protocol_active, protocol_v100, sizeof(protocol_v100));
+
+ log_debug("config_no_withdraw %d", config_no_withdraw);
+ log_debug("config_no_plock %d", config_no_plock);
+ log_debug("config_plock_rate_limit %u", config_plock_rate_limit);
+ log_debug("config_plock_ownership %u", config_plock_ownership);
+ log_debug("config_drop_resources_time %u", config_drop_resources_time);
+ log_debug("config_drop_resources_count %u", config_drop_resources_count);
+ log_debug("config_drop_resources_age %u", config_drop_resources_age);
+ log_debug("protocol %u.%u.%u", protocol_active[0], protocol_active[1],
+ protocol_active[2]);
+
set_scheduler();
set_oom_adj(-16);
--- cluster/group/gfs_controld/plock.c 2007/07/19 20:23:16 1.25.2.7
+++ cluster/group/gfs_controld/plock.c 2007/12/05 22:11:32 1.25.2.8
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -44,20 +44,26 @@
#define CONTROL_DIR "/dev/misc"
#define CONTROL_NAME "lock_dlm_plock"
-static int control_fd = -1;
extern int our_nodeid;
-static int plocks_online = 0;
extern int message_flow_control_on;
-extern int no_plock;
-extern uint32_t plock_rate_limit;
-uint32_t plock_read_count;
-uint32_t plock_recv_count;
-uint32_t plock_rate_delays;
-struct timeval plock_read_time;
-struct timeval plock_recv_time;
-struct timeval plock_rate_last;
+/* user configurable */
+extern int config_no_plock;
+extern uint32_t config_plock_rate_limit;
+extern uint32_t config_plock_ownership;
+extern uint32_t config_drop_resources_time;
+extern uint32_t config_drop_resources_count;
+extern uint32_t config_drop_resources_age;
+
+static int plocks_online = 0;
+static uint32_t plock_read_count;
+static uint32_t plock_recv_count;
+static uint32_t plock_rate_delays;
+static struct timeval plock_read_time;
+static struct timeval plock_recv_time;
+static struct timeval plock_rate_last;
+static int control_fd = -1;
static SaCkptHandleT ckpt_handle;
static SaCkptCallbacksT callbacks = { 0, 0 };
static SaVersionT version = { 'B', 1, 1 };
@@ -76,13 +82,22 @@
uint32_t pad;
};
+#define R_GOT_UNOWN 0x00000001 /* have received owner=0 message */
+
struct resource {
struct list_head list; /* list of resources */
uint64_t number;
- struct list_head locks; /* one lock for each range */
+ int owner; /* nodeid or 0 for unowned */
+ uint32_t flags;
+ struct timeval last_access;
+ struct list_head locks; /* one lock for each range */
struct list_head waiters;
+ struct list_head pending; /* discovering r owner */
};
+#define P_SYNCING 0x00000001 /* plock has been sent as part of sync but not
+ yet received */
+
struct posix_lock {
struct list_head list; /* resource locks or waiters list */
uint32_t pid;
@@ -91,13 +106,26 @@
uint64_t end;
int ex;
int nodeid;
+ uint32_t flags;
};
struct lock_waiter {
struct list_head list;
+ uint32_t flags;
struct gdlm_plock_info info;
};
+
+static void send_own(struct mountgroup *mg, struct resource *r, int owner);
+static void save_pending_plock(struct mountgroup *mg, struct resource *r,
+ struct gdlm_plock_info *in);
+
+
+static int got_unown(struct resource *r)
+{
+ return !!(r->flags & R_GOT_UNOWN);
+}
+
static void info_bswap_out(struct gdlm_plock_info *i)
{
i->version[0] = cpu_to_le32(i->version[0]);
@@ -292,7 +320,7 @@
gettimeofday(&plock_recv_time, NULL);
gettimeofday(&plock_rate_last, NULL);
- if (no_plock)
+ if (config_no_plock)
goto control;
err = saCkptInitialize(&ckpt_handle, &callbacks, &version);
@@ -333,113 +361,6 @@
return dt;
}
-int process_plocks(void)
-{
- struct mountgroup *mg;
- struct gdlm_plock_info info;
- struct gdlm_header *hd;
- struct timeval now;
- char *buf;
- uint64_t usec;
- int len, rv;
-
- /* Don't send more messages while the cpg message queue is backed up */
-
- if (message_flow_control_on) {
- update_flow_control_status();
- if (message_flow_control_on)
- return -EBUSY;
- }
-
- /* Every N ops we check how long it's taken to do those N ops.
- If it's less than 1000 ms, we don't take any more. */
-
- if (plock_rate_limit && plock_read_count &&
- !(plock_read_count % plock_rate_limit)) {
- gettimeofday(&now, NULL);
- if (time_diff_ms(&plock_rate_last, &now) < 1000) {
- plock_rate_delays++;
- return -EBUSY;
- }
- plock_rate_last = now;
- }
-
- memset(&info, 0, sizeof(info));
-
- rv = do_read(control_fd, &info, sizeof(info));
- if (rv < 0) {
- log_debug("process_plocks: read error %d fd %d\n",
- errno, control_fd);
- return 0;
- }
-
- if (!plocks_online) {
- rv = -ENOSYS;
- goto fail;
- }
-
- mg = find_mg_id(info.fsid);
- if (!mg) {
- log_debug("process_plocks: no mg id %x", info.fsid);
- rv = -EEXIST;
- goto fail;
- }
-
- log_plock(mg, "read plock %llx %s %s %llx-%llx %d/%u/%llx w %d",
- (unsigned long long)info.number,
- op_str(info.optype),
- ex_str(info.optype, info.ex),
- (unsigned long long)info.start, (unsigned long long)info.end,
- info.nodeid, info.pid, (unsigned long long)info.owner,
- info.wait);
-
- /* report plock rate and any delays since the last report */
- plock_read_count++;
- if (!(plock_read_count % 1000)) {
- gettimeofday(&now, NULL);
- usec = dt_usec(&plock_read_time, &now) ;
- log_group(mg, "plock_read_count %u time %.3f s delays %u",
- plock_read_count, usec * 1.e-6, plock_rate_delays);
- plock_read_time = now;
- plock_rate_delays = 0;
- }
-
- len = sizeof(struct gdlm_header) + sizeof(struct gdlm_plock_info);
- buf = malloc(len);
- if (!buf) {
- rv = -ENOMEM;
- goto fail;
- }
- memset(buf, 0, len);
-
- info.nodeid = our_nodeid;
-
- hd = (struct gdlm_header *)buf;
- hd->type = MSG_PLOCK;
- hd->nodeid = our_nodeid;
- hd->to_nodeid = 0;
- memcpy(buf + sizeof(struct gdlm_header), &info, sizeof(info));
-
- info_bswap_out((struct gdlm_plock_info *) buf +
- sizeof(struct gdlm_header));
-
- rv = send_group_message(mg, len, buf);
-
- free(buf);
-
- if (rv) {
- log_error("send plock error %d", rv);
- goto fail;
- }
- return 0;
-
- fail:
- info.rv = rv;
- rv = write(control_fd, &info, sizeof(info));
-
- return 0;
-}
-
static struct resource *search_resource(struct mountgroup *mg, uint64_t number)
{
struct resource *r;
@@ -468,6 +389,7 @@
r = malloc(sizeof(struct resource));
if (!r) {
+ log_error("find_resource no memory %d", errno);
rv = -ENOMEM;
goto out;
}
@@ -476,15 +398,27 @@
r->number = number;
INIT_LIST_HEAD(&r->locks);
INIT_LIST_HEAD(&r->waiters);
+ INIT_LIST_HEAD(&r->pending);
+
+ if (config_plock_ownership)
+ r->owner = -1;
+ else
+ r->owner = 0;
list_add_tail(&r->list, &mg->resources);
out:
+ if (r)
+ gettimeofday(&r->last_access, NULL);
*r_out = r;
return rv;
}
static void put_resource(struct resource *r)
{
+ /* with ownership, resources are only freed via drop messages */
+ if (config_plock_ownership)
+ return;
+
if (list_empty(&r->locks) && list_empty(&r->waiters)) {
list_del(&r->list);
free(r);
@@ -825,6 +759,7 @@
{
struct lock_waiter *w;
+
w = malloc(sizeof(struct lock_waiter));
if (!w)
return -ENOMEM;
@@ -873,15 +808,11 @@
}
}
-static void do_lock(struct mountgroup *mg, struct gdlm_plock_info *in)
+static void do_lock(struct mountgroup *mg, struct gdlm_plock_info *in,
+ struct resource *r)
{
- struct resource *r = NULL;
int rv;
- rv = find_resource(mg, in->number, 1, &r);
- if (rv)
- goto out;
-
if (is_conflict(r, in, 0)) {
if (!in->wait)
rv = -EAGAIN;
@@ -902,41 +833,57 @@
put_resource(r);
}
-static void do_unlock(struct mountgroup *mg, struct gdlm_plock_info *in)
+static void do_unlock(struct mountgroup *mg, struct gdlm_plock_info *in,
+ struct resource *r)
{
- struct resource *r = NULL;
int rv;
- rv = find_resource(mg, in->number, 0, &r);
- if (!rv)
- rv = unlock_internal(mg, r, in);
+ rv = unlock_internal(mg, r, in);
if (in->nodeid == our_nodeid)
write_result(mg, in, rv);
- if (r) {
- do_waiters(mg, r);
- put_resource(r);
- }
+ do_waiters(mg, r);
+ put_resource(r);
}
-static void do_get(struct mountgroup *mg, struct gdlm_plock_info *in)
+/* we don't even get to this function if the getlk isn't from us */
+
+static void do_get(struct mountgroup *mg, struct gdlm_plock_info *in,
+ struct resource *r)
{
- struct resource *r = NULL;
int rv;
- rv = find_resource(mg, in->number, 0, &r);
- if (rv)
- goto out;
-
if (is_conflict(r, in, 1))
rv = 1;
else
rv = 0;
- out:
+
write_result(mg, in, rv);
}
+static void __receive_plock(struct mountgroup *mg, struct gdlm_plock_info *in,
+ int from, struct resource *r)
+{
+ switch (in->optype) {
+ case GDLM_PLOCK_OP_LOCK:
+ mg->last_plock_time = time(NULL);
+ do_lock(mg, in, r);
+ break;
+ case GDLM_PLOCK_OP_UNLOCK:
+ mg->last_plock_time = time(NULL);
+ do_unlock(mg, in, r);
+ break;
+ case GDLM_PLOCK_OP_GET:
+ do_get(mg, in, r);
+ break;
+ default:
+ log_error("receive_plock from %d optype %d", from, in->optype);
+ if (from == our_nodeid)
+ write_result(mg, in, -EINVAL);
+ }
+}
+
/* When mg members receive our options message (for our mount), one of them
saves all plock state received to that point in a checkpoint and then sends
us our journals message. We know to retrieve the plock state from the
@@ -947,16 +894,16 @@
set save_plocks (when we see our options message) can be ignored because it
should be reflected in the checkpointed state. */
-void _receive_plock(struct mountgroup *mg, char *buf, int len, int from)
+static void _receive_plock(struct mountgroup *mg, char *buf, int len, int from)
{
struct gdlm_plock_info info;
struct gdlm_header *hd = (struct gdlm_header *) buf;
+ struct resource *r = NULL;
struct timeval now;
uint64_t usec;
- int rv = 0;
+ int rv, create;
memcpy(&info, buf + sizeof(struct gdlm_header), sizeof(info));
-
info_bswap_in(&info);
log_plock(mg, "receive plock %llx %s %s %llx-%llx %d/%u/%llx w %d",
@@ -982,30 +929,89 @@
if (from != hd->nodeid || from != info.nodeid) {
log_error("receive_plock from %d header %d info %d",
from, hd->nodeid, info.nodeid);
- rv = -EINVAL;
- goto out;
+ return;
}
- switch (info.optype) {
- case GDLM_PLOCK_OP_LOCK:
- mg->last_plock_time = time(NULL);
- do_lock(mg, &info);
- break;
- case GDLM_PLOCK_OP_UNLOCK:
- mg->last_plock_time = time(NULL);
- do_unlock(mg, &info);
- break;
- case GDLM_PLOCK_OP_GET:
- do_get(mg, &info);
- break;
- default:
- log_error("receive_plock from %d optype %d", from, info.optype);
- rv = -EINVAL;
+ create = !config_plock_ownership;
+
+ rv = find_resource(mg, info.number, create, &r);
+
+ if (rv && config_plock_ownership) {
+ /* There must have been a race with a drop, so we need to
+ ignore this plock op which will be resent. If we're the one
+ who sent the plock, we need to send_own() and put it on the
+ pending list to resend once the owner is established. */
+
+ log_debug("receive_plock from %d no r %llx", from,
+ (unsigned long long)info.number);
+
+ if (from != our_nodeid)
+ return;
+
+ rv = find_resource(mg, info.number, 1, &r);
+ if (rv)
+ return;
+ send_own(mg, r, our_nodeid);
+ save_pending_plock(mg, r, &info);
+ return;
}
+ if (rv) {
+ /* r not found, rv is -ENOENT, this shouldn't happen because
+ process_plocks() creates a resource for every op */
- out:
- if (from == our_nodeid && rv)
- write_result(mg, &info, rv);
+ log_error("receive_plock from %d no r %llx %d", from,
+ (unsigned long long)info.number, rv);
+ return;
+ }
+
+ /* The owner should almost always be 0 here, but other owners may
+ be possible given odd combinations of races with drop. Odd races to
+ worry about (some seem pretty improbable):
+
+ - A sends drop, B sends plock, receive drop, receive plock.
+ This is addressed above.
+
+ - A sends drop, B sends plock, receive drop, B reads plock
+ and sends own, receive plock, on B we find owner of -1.
+
+ - A sends drop, B sends two plocks, receive drop, receive plocks.
+ Receiving the first plock is the previous case, receiving the
+ second plock will find r with owner of -1.
+
+ - A sends drop, B sends two plocks, receive drop, C sends own,
+ receive plock, B sends own, receive own (C), receive plock,
+ receive own (B).
+
+ Haven't tried to cook up a scenario that would lead to the
+ last case below; receiving a plock from ourself and finding
+ we're the owner of r. */
+
+ if (!r->owner) {
+ __receive_plock(mg, &info, from, r);
+
+ } else if (r->owner == -1) {
+ log_debug("receive_plock from %d r %llx owner %d", from,
+ (unsigned long long)info.number, r->owner);
+
+ if (from == our_nodeid)
+ save_pending_plock(mg, r, &info);
+
+ } else if (r->owner != our_nodeid) {
+ /* might happen, if frequent change to log_debug */
+ log_error("receive_plock from %d r %llx owner %d", from,
+ (unsigned long long)info.number, r->owner);
+
+ if (from == our_nodeid)
+ save_pending_plock(mg, r, &info);
+
+ } else if (r->owner == our_nodeid) {
+ /* might happen, if frequent change to log_debug */
+ log_error("receive_plock from %d r %llx owner %d", from,
+ (unsigned long long)info.number, r->owner);
+
+ if (from == our_nodeid)
+ __receive_plock(mg, &info, from, r);
+ }
}
void receive_plock(struct mountgroup *mg, char *buf, int len, int from)
@@ -1023,86 +1029,694 @@
_receive_plock(mg, buf, len, from);
}
-void process_saved_plocks(struct mountgroup *mg)
+static int send_struct_info(struct mountgroup *mg, struct gdlm_plock_info *in,
+ int msg_type)
{
- struct save_msg *sm, *sm2;
+ char *buf;
+ int rv, len;
+ struct gdlm_header *hd;
- if (list_empty(&mg->saved_messages))
- return;
+ len = sizeof(struct gdlm_header) + sizeof(struct gdlm_plock_info);
+ buf = malloc(len);
+ if (!buf) {
+ rv = -ENOMEM;
+ goto out;
+ }
+ memset(buf, 0, len);
- log_group(mg, "process_saved_plocks");
+ hd = (struct gdlm_header *)buf;
+ hd->type = msg_type;
+ hd->nodeid = our_nodeid;
+ hd->to_nodeid = 0;
- list_for_each_entry_safe(sm, sm2, &mg->saved_messages, list) {
- if (sm->type != MSG_PLOCK)
- continue;
- _receive_plock(mg, sm->buf, sm->len, sm->nodeid);
- list_del(&sm->list);
- free(sm);
- }
+ memcpy(buf + sizeof(struct gdlm_header), in, sizeof(*in));
+ info_bswap_out((struct gdlm_plock_info *) buf + sizeof(*hd));
+
+ rv = send_group_message(mg, len, buf);
+
+ free(buf);
+ out:
+ if (rv)
+ log_error("send plock message error %d", rv);
+ return rv;
}
-void plock_exit(void)
+static void send_plock(struct mountgroup *mg, struct resource *r,
+ struct gdlm_plock_info *in)
{
- if (plocks_online)
- saCkptFinalize(ckpt_handle);
+ send_struct_info(mg, in, MSG_PLOCK);
}
-void pack_section_buf(struct mountgroup *mg, struct resource *r)
+static void send_own(struct mountgroup *mg, struct resource *r, int owner)
{
- struct pack_plock *pp;
- struct posix_lock *po;
- struct lock_waiter *w;
- int count = 0;
-
- memset(§ion_buf, 0, sizeof(section_buf));
+ struct gdlm_plock_info info;
- pp = (struct pack_plock *) §ion_buf;
+ /* if we've already sent an own message for this resource,
+ (pending list is not empty), then we shouldn't send another */
- list_for_each_entry(po, &r->locks, list) {
- pp->start = cpu_to_le64(po->start);
- pp->end = cpu_to_le64(po->end);
- pp->owner = cpu_to_le64(po->owner);
- pp->pid = cpu_to_le32(po->pid);
- pp->nodeid = cpu_to_le32(po->nodeid);
- pp->ex = po->ex;
- pp->waiter = 0;
- pp++;
- count++;
+ if (!list_empty(&r->pending)) {
+ log_debug("send_own %llx already pending",
+ (unsigned long long)r->number);
+ return;
}
- list_for_each_entry(w, &r->waiters, list) {
- pp->start = cpu_to_le64(w->info.start);
- pp->end = cpu_to_le64(w->info.end);
- pp->owner = cpu_to_le64(w->info.owner);
- pp->pid = cpu_to_le32(w->info.pid);
- pp->nodeid = cpu_to_le32(w->info.nodeid);
- pp->ex = w->info.ex;
- pp->waiter = 1;
- pp++;
- count++;
- }
+ memset(&info, 0, sizeof(info));
+ info.number = r->number;
+ info.nodeid = owner;
- section_len = count * sizeof(struct pack_plock);
+ send_struct_info(mg, &info, MSG_PLOCK_OWN);
}
-int unpack_section_buf(struct mountgroup *mg, char *numbuf, int buflen)
+static void send_syncs(struct mountgroup *mg, struct resource *r)
{
- struct pack_plock *pp;
+ struct gdlm_plock_info info;
struct posix_lock *po;
struct lock_waiter *w;
- struct resource *r;
- int count = section_len / sizeof(struct pack_plock);
- int i;
- unsigned long long num;
+ int rv;
- r = malloc(sizeof(struct resource));
+ list_for_each_entry(po, &r->locks, list) {
+ memset(&info, 0, sizeof(info));
+ info.number = r->number;
+ info.start = po->start;
+ info.end = po->end;
+ info.nodeid = po->nodeid;
+ info.owner = po->owner;
+ info.pid = po->pid;
+ info.ex = po->ex;
+
+ rv = send_struct_info(mg, &info, MSG_PLOCK_SYNC_LOCK);
+ if (rv)
+ goto out;
+
+ po->flags |= P_SYNCING;
+ }
+
+ list_for_each_entry(w, &r->waiters, list) {
+ memcpy(&info, &w->info, sizeof(info));
+
+ rv = send_struct_info(mg, &info, MSG_PLOCK_SYNC_WAITER);
+ if (rv)
+ goto out;
+
+ w->flags |= P_SYNCING;
+ }
+ out:
+ return;
+}
+
+static void send_drop(struct mountgroup *mg, struct resource *r)
+{
+ struct gdlm_plock_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.number = r->number;
+
+ send_struct_info(mg, &info, MSG_PLOCK_DROP);
+}
+
+/* plock op can't be handled until we know the owner value of the resource,
+ so the op is saved on the pending list until the r owner is established */
+
+static void save_pending_plock(struct mountgroup *mg, struct resource *r,
+ struct gdlm_plock_info *in)
+{
+ struct lock_waiter *w;
+
+ w = malloc(sizeof(struct lock_waiter));
+ if (!w) {
+ log_error("save_pending_plock no mem");
+ return;
+ }
+ memcpy(&w->info, in, sizeof(struct gdlm_plock_info));
+ list_add_tail(&w->list, &r->pending);
+}
+
+/* plock ops are on pending list waiting for ownership to be established.
+ owner has now become us, so add these plocks to r */
+
+static void add_pending_plocks(struct mountgroup *mg, struct resource *r)
+{
+ struct lock_waiter *w, *safe;
+
+ list_for_each_entry_safe(w, safe, &r->pending, list) {
+ __receive_plock(mg, &w->info, our_nodeid, r);
+ list_del(&w->list);
+ free(w);
+ }
+}
+
+/* plock ops are on pending list waiting for ownership to be established.
+ owner has now become 0, so send these plocks to everyone */
+
+static void send_pending_plocks(struct mountgroup *mg, struct resource *r)
+{
+ struct lock_waiter *w, *safe;
+
+ list_for_each_entry_safe(w, safe, &r->pending, list) {
+ send_plock(mg, r, &w->info);
+ list_del(&w->list);
+ free(w);
+ }
+}
+
+static void _receive_own(struct mountgroup *mg, char *buf, int len, int from)
+{
+ struct gdlm_header *hd = (struct gdlm_header *) buf;
+ struct gdlm_plock_info info;
+ struct resource *r;
+ int should_not_happen = 0;
+ int rv;
+
+ memcpy(&info, buf + sizeof(struct gdlm_header), sizeof(info));
+ info_bswap_in(&info);
+
+ log_plock(mg, "receive own %llx from %u owner %u",
+ (unsigned long long)info.number, hd->nodeid, info.nodeid);
+
+ rv = find_resource(mg, info.number, 1, &r);
+ if (rv)
+ return;
+
+ if (from == our_nodeid) {
+ /*
+ * received our own own message
+ */
+
+ if (info.nodeid == 0) {
+ /* we are setting owner to 0 */
+
+ if (r->owner == our_nodeid) {
+ /* we set owner to 0 when we relinquish
+ ownership */
+ should_not_happen = 1;
+ } else if (r->owner == 0) {
+ /* this happens when we relinquish ownership */
+ r->flags |= R_GOT_UNOWN;
+ } else {
+ should_not_happen = 1;
+ }
+
+ } else if (info.nodeid == our_nodeid) {
+ /* we are setting owner to ourself */
+
+ if (r->owner == -1) {
+ /* we have gained ownership */
+ r->owner = our_nodeid;
+ add_pending_plocks(mg, r);
+ } else if (r->owner == our_nodeid) {
+ should_not_happen = 1;
+ } else if (r->owner == 0) {
+ send_pending_plocks(mg, r);
+ } else {
+ /* resource is owned by other node;
+ they should set owner to 0 shortly */
+ }
+
+ } else {
+ /* we should only ever set owner to 0 or ourself */
+ should_not_happen = 1;
+ }
+ } else {
+ /*
+ * received own message from another node
+ */
+
+ if (info.nodeid == 0) {
+ /* other node is setting owner to 0 */
+
+ if (r->owner == -1) {
+ /* we should have a record of the owner before
+ it relinquishes */
+ should_not_happen = 1;
+ } else if (r->owner == our_nodeid) {
+ /* only the owner should relinquish */
+ should_not_happen = 1;
+ } else if (r->owner == 0) {
+ should_not_happen = 1;
+ } else {
+ r->owner = 0;
+ r->flags |= R_GOT_UNOWN;
+ send_pending_plocks(mg, r);
+ }
+
+ } else if (info.nodeid == from) {
+ /* other node is setting owner to itself */
+
+ if (r->owner == -1) {
+ /* normal path for a node becoming owner */
+ r->owner = from;
+ } else if (r->owner == our_nodeid) {
+ /* we relinquish our ownership: sync our local
+ plocks to everyone, then set owner to 0 */
+ send_syncs(mg, r);
+ send_own(mg, r, 0);
+ /* we need to set owner to 0 here because
+ local ops may arrive before we receive
+ our send_own message and can't be added
+ locally */
+ r->owner = 0;
+ } else if (r->owner == 0) {
+ /* can happen because we set owner to 0 before
+ we receive our send_own sent just above */
+ } else {
+ /* do nothing, current owner should be
+ relinquishing its ownership */
+ }
+
+ } else if (info.nodeid == our_nodeid) {
+ /* no one else should try to set the owner to us */
+ should_not_happen = 1;
+ } else {
+ /* a node should only ever set owner to 0 or itself */
+ should_not_happen = 1;
+ }
+ }
+
+ if (should_not_happen) {
+ log_error("receive_own from %u %llx info nodeid %d r owner %d",
+ from, (unsigned long long)r->number, info.nodeid,
+ r->owner);
+ }
+}
+
+void receive_own(struct mountgroup *mg, char *buf, int len, int from)
+{
+ if (mg->save_plocks) {
+ save_message(mg, buf, len, from, MSG_PLOCK_OWN);
+ return;
+ }
+
+ _receive_own(mg, buf, len, from);
+}
+
+static void clear_syncing_flag(struct resource *r, struct gdlm_plock_info *in)
+{
+ struct posix_lock *po;
+ struct lock_waiter *w;
+
+ list_for_each_entry(po, &r->locks, list) {
+ if ((po->flags & P_SYNCING) &&
+ in->start == po->start &&
+ in->end == po->end &&
+ in->nodeid == po->nodeid &&
+ in->owner == po->owner &&
+ in->pid == po->pid &&
+ in->ex == po->ex) {
+ po->flags &= ~P_SYNCING;
+ return;
+ }
+ }
+
+ list_for_each_entry(w, &r->waiters, list) {
+ if ((w->flags & P_SYNCING) &&
+ in->start == w->info.start &&
+ in->end == w->info.end &&
+ in->nodeid == w->info.nodeid &&
+ in->owner == w->info.owner &&
+ in->pid == w->info.pid &&
+ in->ex == w->info.ex) {
+ w->flags &= ~P_SYNCING;
+ return;
+ }
+ }
+
+ log_error("clear_syncing %llx no match %s %llx-%llx %d/%u/%llx",
+ (unsigned long long)r->number, in->ex ? "WR" : "RD",
+ (unsigned long long)in->start, (unsigned long long)in->end,
+ in->nodeid, in->pid, (unsigned long long)in->owner);
+}
+
+static void _receive_sync(struct mountgroup *mg, char *buf, int len, int from)
+{
+ struct gdlm_plock_info info;
+ struct gdlm_header *hd = (struct gdlm_header *) buf;
+ struct resource *r;
+ int rv;
+
+ memcpy(&info, buf + sizeof(struct gdlm_header), sizeof(info));
+ info_bswap_in(&info);
+
+ log_plock(mg, "receive sync %llx from %u %s %llx-%llx %d/%u/%llx",
+ (unsigned long long)info.number, from, info.ex ? "WR" : "RD",
+ (unsigned long long)info.start, (unsigned long long)info.end,
+ info.nodeid, info.pid, (unsigned long long)info.owner);
+
+ rv = find_resource(mg, info.number, 0, &r);
+ if (rv) {
+ log_error("receive_sync no r %llx from %d", info.number, from);
+ return;
+ }
+
+ if (from == our_nodeid) {
+ /* this plock now in sync on all nodes */
+ clear_syncing_flag(r, &info);
+ return;
+ }
+
+ if (hd->type == MSG_PLOCK_SYNC_LOCK)
+ add_lock(r, info.nodeid, info.owner, info.pid, !info.ex,
+ info.start, info.end);
+ else if (hd->type == MSG_PLOCK_SYNC_WAITER)
+ add_waiter(mg, r, &info);
+}
+
+void receive_sync(struct mountgroup *mg, char *buf, int len, int from)
+{
+ struct gdlm_header *hd = (struct gdlm_header *) buf;
+
+ if (mg->save_plocks) {
+ save_message(mg, buf, len, from, hd->type);
+ return;
+ }
+
+ _receive_sync(mg, buf, len, from);
+}
+
+static void _receive_drop(struct mountgroup *mg, char *buf, int len, int from)
+{
+ struct gdlm_plock_info info;
+ struct resource *r;
+ int rv;
+
+ memcpy(&info, buf + sizeof(struct gdlm_header), sizeof(info));
+ info_bswap_in(&info);
+
+ log_plock(mg, "receive drop %llx from %u",
+ (unsigned long long)info.number, from);
+
+ rv = find_resource(mg, info.number, 0, &r);
+ if (rv) {
+ /* we'll find no r if two nodes sent drop at once */
+ log_debug("receive_drop from %d no r %llx", from,
+ (unsigned long long)info.number);
+ return;
+ }
+
+ if (r->owner != 0) {
+ /* - A sent drop, B sent drop, receive drop A, C sent own,
+ receive drop B (this warning on C, owner -1)
+ - A sent drop, B sent drop, receive drop A, A sent own,
+ receive own A, receive drop B (this warning on all,
+ owner A) */
+ log_debug("receive_drop from %d r %llx owner %d", from,
+ (unsigned long long)r->number, r->owner);
+ return;
+ }
+
+ if (!list_empty(&r->pending)) {
+ /* shouldn't happen */
+ log_error("receive_drop from %d r %llx pending op", from,
+ (unsigned long long)r->number);
+ return;
+ }
+
+ /* the decision to drop or not must be based on things that are
+ guaranteed to be the same on all nodes */
+
+ if (list_empty(&r->locks) && list_empty(&r->waiters)) {
+ list_del(&r->list);
+ free(r);
+ } else {
+ /* A sent drop, B sent a plock, receive plock, receive drop */
+ log_debug("receive_drop from %d r %llx in use", from,
+ (unsigned long long)r->number);
+ }
+}
+
+void receive_drop(struct mountgroup *mg, char *buf, int len, int from)
+{
+ if (mg->save_plocks) {
+ save_message(mg, buf, len, from, MSG_PLOCK_DROP);
+ return;
+ }
+
+ _receive_drop(mg, buf, len, from);
+}
+
+/* We only drop resources from the unowned state to simplify things.
+ If we want to drop a resource we own, we unown/relinquish it first. */
+
+/* FIXME: in the transition from owner = us, to owner = 0, to drop;
+ we want the second period to be shorter than the first */
+
+static int drop_resources(struct mountgroup *mg)
+{
+ struct resource *r;
+ struct timeval now;
+ int count = 0;
+
+ gettimeofday(&now, NULL);
+
+ /* try to drop the oldest, unused resources */
+
+ list_for_each_entry_reverse(r, &mg->resources, list) {
+ if (count >= config_drop_resources_count)
+ break;
+ if (r->owner && r->owner != our_nodeid)
+ continue;
+ if (time_diff_ms(&r->last_access, &now) <
+ config_drop_resources_age)
+ continue;
+
+ if (list_empty(&r->locks) && list_empty(&r->waiters)) {
+ if (r->owner == our_nodeid) {
+ send_own(mg, r, 0);
+ r->owner = 0;
+ } else if (r->owner == 0 && got_unown(r)) {
+ send_drop(mg, r);
+ }
+
+ count++;
+ }
+ }
+
+ return 0;
+}
+
+int process_plocks(void)
+{
+ struct mountgroup *mg;
+ struct resource *r;
+ struct gdlm_plock_info info;
+ struct timeval now;
+ uint64_t usec;
+ int rv;
+
+ /* Don't send more messages while the cpg message queue is backed up */
+
+ if (message_flow_control_on) {
+ update_flow_control_status();
+ if (message_flow_control_on)
+ return -EBUSY;
+ }
+
+ gettimeofday(&now, NULL);
+
+ /* Every N ops we check how long it's taken to do those N ops.
+ If it's less than 1000 ms, we don't take any more. */
+
+ if (config_plock_rate_limit && plock_read_count &&
+ !(plock_read_count % config_plock_rate_limit)) {
+ if (time_diff_ms(&plock_rate_last, &now) < 1000) {
+ plock_rate_delays++;
+ return -EBUSY;
+ }
+ plock_rate_last = now;
+ }
+
+ memset(&info, 0, sizeof(info));
+
+ rv = do_read(control_fd, &info, sizeof(info));
+ if (rv < 0) {
+ log_debug("process_plocks: read error %d fd %d\n",
+ errno, control_fd);
+ return 0;
+ }
+
+ /* kernel doesn't set the nodeid field */
+ info.nodeid = our_nodeid;
+
+ if (!plocks_online) {
+ rv = -ENOSYS;
+ goto fail;
+ }
+
+ mg = find_mg_id(info.fsid);
+ if (!mg) {
+ log_debug("process_plocks: no mg id %x", info.fsid);
+ rv = -EEXIST;
+ goto fail;
+ }
+
+ log_plock(mg, "read plock %llx %s %s %llx-%llx %d/%u/%llx w %d",
+ (unsigned long long)info.number,
+ op_str(info.optype),
+ ex_str(info.optype, info.ex),
+ (unsigned long long)info.start, (unsigned long long)info.end,
+ info.nodeid, info.pid, (unsigned long long)info.owner,
+ info.wait);
+
+ /* report plock rate and any delays since the last report */
+ plock_read_count++;
+ if (!(plock_read_count % 1000)) {
+ usec = dt_usec(&plock_read_time, &now) ;
+ log_group(mg, "plock_read_count %u time %.3f s delays %u",
+ plock_read_count, usec * 1.e-6, plock_rate_delays);
+ plock_read_time = now;
+ plock_rate_delays = 0;
+ }
+
+ rv = find_resource(mg, info.number, 1, &r);
+ if (rv)
+ goto fail;
+
+ if (r->owner == 0) {
+ /* plock state replicated on all nodes */
+ send_plock(mg, r, &info);
+
+ } else if (r->owner == our_nodeid) {
+ /* we are the owner of r, so our plocks are local */
+ __receive_plock(mg, &info, our_nodeid, r);
+
+ } else {
+ /* r owner is -1: r is new, try to become the owner;
+ r owner > 0: tell other owner to give up ownership;
+ both done with a message trying to set owner to ourself */
+ send_own(mg, r, our_nodeid);
+ save_pending_plock(mg, r, &info);
+ }
+
+ if (config_plock_ownership &&
+ time_diff_ms(&mg->drop_resources_last, &now) >=
+ config_drop_resources_time) {
+ mg->drop_resources_last = now;
+ drop_resources(mg);
+ }
+
+ return 0;
+
+ fail:
+ info.rv = rv;
+ rv = write(control_fd, &info, sizeof(info));
+
+ return 0;
+}
+
+void process_saved_plocks(struct mountgroup *mg)
+{
+ struct save_msg *sm, *sm2;
+
+ if (list_empty(&mg->saved_messages))
+ return;
+
+ log_group(mg, "process_saved_plocks");
+
+ list_for_each_entry_safe(sm, sm2, &mg->saved_messages, list) {
+ switch (sm->type) {
+ case MSG_PLOCK:
+ _receive_plock(mg, sm->buf, sm->len, sm->nodeid);
+ break;
+ case MSG_PLOCK_OWN:
+ _receive_own(mg, sm->buf, sm->len, sm->nodeid);
+ break;
+ case MSG_PLOCK_DROP:
+ _receive_drop(mg, sm->buf, sm->len, sm->nodeid);
+ break;
+ case MSG_PLOCK_SYNC_LOCK:
+ case MSG_PLOCK_SYNC_WAITER:
+ _receive_sync(mg, sm->buf, sm->len, sm->nodeid);
+ break;
+ default:
+ continue;
+ }
+
+ list_del(&sm->list);
+ free(sm);
+ }
+}
+
+void plock_exit(void)
+{
+ if (plocks_online)
+ saCkptFinalize(ckpt_handle);
+}
+
+/* locks still marked SYNCING should not go into the ckpt; the new node
+ will get those locks by receiving PLOCK_SYNC messages */
+
+static void pack_section_buf(struct mountgroup *mg, struct resource *r)
+{
+ struct pack_plock *pp;
+ struct posix_lock *po;
+ struct lock_waiter *w;
+ int count = 0;
+
+ /* plocks on owned resources are not replicated on other nodes */
+ if (r->owner == our_nodeid)
+ return;
+
+ pp = (struct pack_plock *) §ion_buf;
+
+ list_for_each_entry(po, &r->locks, list) {
+ if (po->flags & P_SYNCING)
+ continue;
+ pp->start = cpu_to_le64(po->start);
+ pp->end = cpu_to_le64(po->end);
+ pp->owner = cpu_to_le64(po->owner);
+ pp->pid = cpu_to_le32(po->pid);
+ pp->nodeid = cpu_to_le32(po->nodeid);
+ pp->ex = po->ex;
+ pp->waiter = 0;
+ pp++;
+ count++;
+ }
+
+ list_for_each_entry(w, &r->waiters, list) {
+ if (w->flags & P_SYNCING)
+ continue;
+ pp->start = cpu_to_le64(w->info.start);
+ pp->end = cpu_to_le64(w->info.end);
+ pp->owner = cpu_to_le64(w->info.owner);
+ pp->pid = cpu_to_le32(w->info.pid);
+ pp->nodeid = cpu_to_le32(w->info.nodeid);
+ pp->ex = w->info.ex;
+ pp->waiter = 1;
+ pp++;
+ count++;
+ }
+
+ section_len = count * sizeof(struct pack_plock);
+}
+
+static int unpack_section_buf(struct mountgroup *mg, char *numbuf, int buflen)
+{
+ struct pack_plock *pp;
+ struct posix_lock *po;
+ struct lock_waiter *w;
+ struct resource *r;
+ int count = section_len / sizeof(struct pack_plock);
+ int i, owner = 0;
+ unsigned long long num;
+ struct timeval now;
+
+ gettimeofday(&now, NULL);
+
+ r = malloc(sizeof(struct resource));
if (!r)
return -ENOMEM;
memset(r, 0, sizeof(struct resource));
INIT_LIST_HEAD(&r->locks);
INIT_LIST_HEAD(&r->waiters);
- sscanf(numbuf, "r%llu", &num);
+ INIT_LIST_HEAD(&r->pending);
+
+ if (config_plock_ownership)
+ sscanf(numbuf, "r%llu.%d", &num, &owner);
+ else
+ sscanf(numbuf, "r%llu", &num);
+
r->number = num;
+ r->owner = owner;
+ r->last_access = now;
pp = (struct pack_plock *) §ion_buf;
@@ -1208,6 +1822,19 @@
return _unlink_checkpoint(mg, &name);
}
+/*
+ * section id is r<inodenum>.<owner>, the maximum string length is:
+ * "r" prefix = 1 strlen("r")
+ * max uint64 = 20 strlen("18446744073709551615")
+ * "." before owner = 1 strlen(".")
+ * max int = 11 strlen("-2147483647")
+ * \0 at end = 1
+ * ---------------------
+ * 34 SECTION_NAME_LEN
+ */
+
+#define SECTION_NAME_LEN 34
+
/* Copy all plock state into a checkpoint so new node can retrieve it. The
node creating the ckpt for the mounter needs to be the same node that's
sending the mounter its journals message (i.e. the low nodeid). The new
@@ -1228,12 +1855,12 @@
SaCkptCheckpointOpenFlagsT flags;
SaNameT name;
SaAisErrorT rv;
- char buf[32];
+ char buf[SECTION_NAME_LEN];
struct resource *r;
struct posix_lock *po;
struct lock_waiter *w;
int r_count, lock_count, total_size, section_size, max_section_size;
- int len;
+ int len, owner;
if (!plocks_online)
return;
@@ -1264,6 +1891,9 @@
max_section_size = 0;
list_for_each_entry(r, &mg->resources, list) {
+ if (r->owner == -1)
+ continue;
+
r_count++;
section_size = 0;
list_for_each_entry(po, &r->locks, list) {
@@ -1290,9 +1920,7 @@
attr.retentionDuration = SA_TIME_MAX;
attr.maxSections = r_count + 1; /* don't know why we need +1 */
attr.maxSectionSize = max_section_size;
- attr.maxSectionIdSize = 22;
-
- /* 22 = 20 digits in max uint64 + "r" prefix + \0 suffix */
+ attr.maxSectionIdSize = SECTION_NAME_LEN;
flags = SA_CKPT_CHECKPOINT_READ |
SA_CKPT_CHECKPOINT_WRITE |
@@ -1318,15 +1946,49 @@
(unsigned long long)h);
mg->cp_handle = (uint64_t) h;
+ /* - If r owner is -1, ckpt nothing.
+ - If r owner is us, ckpt owner of us and no plocks.
+ - If r owner is other, ckpt that owner and any plocks we have on r
+ (they've just been synced but owner=0 msg not recved yet).
+ - If r owner is 0 and !got_unown, then we've just unowned r;
+ ckpt owner of us and any plocks that don't have SYNCING set
+ (plocks with SYNCING will be handled by our sync messages).
+ - If r owner is 0 and got_unown, then ckpt owner 0 and all plocks;
+ (there should be no SYNCING plocks) */
+
list_for_each_entry(r, &mg->resources, list) {
- memset(&buf, 0, 32);
- len = snprintf(buf, 32, "r%llu", (unsigned long long)r->number);
+ if (r->owner == -1)
+ continue;
+ else if (r->owner == our_nodeid)
+ owner = our_nodeid;
+ else if (r->owner)
+ owner = r->owner;
+ else if (!r->owner && !got_unown(r))
+ owner = our_nodeid;
+ else if (!r->owner)
+ owner = 0;
+ else {
+ log_error("store_plocks owner %d r %llx", r->owner,
+ (unsigned long long)r->number);
+ continue;
+ }
+
+ memset(&buf, 0, sizeof(buf));
+ if (config_plock_ownership)
+ len = snprintf(buf, SECTION_NAME_LEN, "r%llu.%d",
+ (unsigned long long)r->number, owner);
+ else
+ len = snprintf(buf, SECTION_NAME_LEN, "r%llu",
+ (unsigned long long)r->number);
section_id.id = (void *)buf;
section_id.idLen = len + 1;
section_attr.sectionId = §ion_id;
section_attr.expirationTime = SA_TIME_END;
+ memset(§ion_buf, 0, sizeof(section_buf));
+ section_len = 0;
+
pack_section_buf(mg, r);
log_group(mg, "store_plocks: section size %u id %u \"%s\"",
@@ -1377,7 +2039,7 @@
SaCkptIOVectorElementT iov;
SaNameT name;
SaAisErrorT rv;
- char buf[32];
+ char buf[SECTION_NAME_LEN];
int len;
if (!plocks_online)
@@ -1440,8 +2102,8 @@
iov.dataSize = desc.sectionSize;
iov.dataOffset = 0;
- memset(&buf, 0, 32);
- snprintf(buf, 32, "%s", desc.sectionId.id);
+ memset(&buf, 0, sizeof(buf));
+ snprintf(buf, SECTION_NAME_LEN, "%s", desc.sectionId.id);
log_group(mg, "retrieve_plocks: section size %llu id %u \"%s\"",
(unsigned long long)iov.dataSize, iov.sectionId.idLen,
buf);
@@ -1488,6 +2150,10 @@
saCkptCheckpointClose(h);
}
+/* Called when a node has failed, or we're unmounting. For a node failure, we
+ need to call this when the cpg confchg arrives so that we're guaranteed all
+ nodes do this in the same sequence wrt other messages. */
+
void purge_plocks(struct mountgroup *mg, int nodeid, int unmount)
{
struct posix_lock *po, *po2;
@@ -1512,11 +2178,23 @@
}
}
- if (list_empty(&r->locks) && list_empty(&r->waiters)) {
+ /* TODO: haven't thought carefully about how this transition
+ to owner 0 might interact with other owner messages in
+ progress. */
+
+ if (r->owner == nodeid) {
+ r->owner = 0;
+ send_pending_plocks(mg, r);
+ }
+
+ if (!list_empty(&r->waiters))
+ do_waiters(mg, r);
+
+ if (!config_plock_ownership &&
+ list_empty(&r->locks) && list_empty(&r->waiters)) {
list_del(&r->list);
free(r);
- } else
- do_waiters(mg, r);
+ }
}
if (purged)
@@ -1549,7 +2227,6 @@
return -1;
list_for_each_entry(r, &mg->resources, list) {
-
list_for_each_entry(po, &r->locks, list) {
snprintf(line, MAXLINE,
"%llu %s %llu-%llu nodeid %d pid %u owner %llx\n",
--- cluster/group/gfs_controld/recover.c 2007/09/04 19:27:34 1.23.2.10
+++ cluster/group/gfs_controld/recover.c 2007/12/05 22:11:32 1.23.2.11
@@ -19,7 +19,7 @@
extern char *clustername;
extern int our_nodeid;
extern group_handle_t gh;
-extern int no_withdraw;
+extern int config_no_withdraw;
extern int dmsetup_wait;
struct list_head mounts;
@@ -1328,8 +1328,6 @@
memb->spectator,
memb->wait_gfs_recover_done);
- purge_plocks(mg, memb->nodeid, 0);
-
if (mg->master_nodeid == memb->nodeid &&
memb->gone_type == GROUP_NODE_FAILED)
master_failed = 1;
@@ -2712,7 +2710,7 @@
char *name = strstr(table, ":") + 1;
int rv;
- if (no_withdraw) {
+ if (config_no_withdraw) {
log_error("withdraw feature not enabled");
return 0;
}
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2007-12-05 22:11 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-11-28 20:49 [Cluster-devel] cluster/group/gfs_controld cpg.c lock_dlm.h ma teigland
-- strict thread matches above, loose matches on Subject: below --
2007-12-05 22:11 teigland
2006-08-09 19:35 teigland
2006-06-15 15:27 teigland
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).