cluster-devel.redhat.com archive mirror
 help / color / mirror / Atom feed
* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_inode.c
@ 2007-07-11 22:15 rpeterso
  0 siblings, 0 replies; 9+ messages in thread
From: rpeterso @ 2007-07-11 22:15 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL51
Changes by:	rpeterso at sourceware.org	2007-07-11 22:15:22

Modified files:
	gfs-kernel/src/gfs: ops_inode.c 

Log message:
	Resolves: bz #241096: GFS: bug in gfs truncate

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_inode.c.diff?cvsroot=cluster&only_with_tag=RHEL51&r1=1.15.2.2&r2=1.15.2.2.2.1

--- cluster/gfs-kernel/src/gfs/ops_inode.c	2007/06/05 17:46:38	1.15.2.2
+++ cluster/gfs-kernel/src/gfs/ops_inode.c	2007/07/11 22:15:22	1.15.2.2.2.1
@@ -1393,8 +1393,11 @@
 		}
 
 		error = gfs_truncatei(ip, attr->ia_size, gfs_truncator_page);
-		if (error)
+		if (error) {
+			if (inode->i_size != ip->i_di.di_size)
+				i_size_write(inode, ip->i_di.di_size);
 			goto fail;
+		}
 
 		if ((sdp->sd_vfs->s_flags & MS_SYNCHRONOUS) &&
 		    !gfs_is_jdata(ip))



^ permalink raw reply	[flat|nested] 9+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_inode.c
@ 2007-07-11 21:58 rpeterso
  0 siblings, 0 replies; 9+ messages in thread
From: rpeterso @ 2007-07-11 21:58 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL5
Changes by:	rpeterso at sourceware.org	2007-07-11 21:58:53

Modified files:
	gfs-kernel/src/gfs: ops_inode.c 

Log message:
	Resolves: bz #241096: GFS: bug in gfs truncate

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_inode.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.15.2.2&r2=1.15.2.3

--- cluster/gfs-kernel/src/gfs/ops_inode.c	2007/06/05 17:46:38	1.15.2.2
+++ cluster/gfs-kernel/src/gfs/ops_inode.c	2007/07/11 21:58:53	1.15.2.3
@@ -1393,8 +1393,11 @@
 		}
 
 		error = gfs_truncatei(ip, attr->ia_size, gfs_truncator_page);
-		if (error)
+		if (error) {
+			if (inode->i_size != ip->i_di.di_size)
+				i_size_write(inode, ip->i_di.di_size);
 			goto fail;
+		}
 
 		if ((sdp->sd_vfs->s_flags & MS_SYNCHRONOUS) &&
 		    !gfs_is_jdata(ip))



^ permalink raw reply	[flat|nested] 9+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_inode.c
@ 2007-06-29 21:57 rpeterso
  0 siblings, 0 replies; 9+ messages in thread
From: rpeterso @ 2007-06-29 21:57 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	rpeterso at sourceware.org	2007-06-29 21:57:23

Modified files:
	gfs-kernel/src/gfs: ops_inode.c 

Log message:
	Resolves: bz 241096: GFS: bug in gfs truncate

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_inode.c.diff?cvsroot=cluster&r1=1.18&r2=1.19

--- cluster/gfs-kernel/src/gfs/ops_inode.c	2007/06/05 18:21:22	1.18
+++ cluster/gfs-kernel/src/gfs/ops_inode.c	2007/06/29 21:57:23	1.19
@@ -1393,8 +1393,11 @@
 		}
 
 		error = gfs_truncatei(ip, attr->ia_size, gfs_truncator_page);
-		if (error)
+		if (error) {
+			if (inode->i_size != ip->i_di.di_size)
+				i_size_write(inode, ip->i_di.di_size);
 			goto fail;
+		}
 
 		if ((sdp->sd_vfs->s_flags & MS_SYNCHRONOUS) &&
 		    !gfs_is_jdata(ip))



^ permalink raw reply	[flat|nested] 9+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_inode.c
@ 2007-06-05 18:21 wcheng
  0 siblings, 0 replies; 9+ messages in thread
From: wcheng @ 2007-06-05 18:21 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	wcheng at sourceware.org	2007-06-05 18:21:23

Modified files:
	gfs-kernel/src/gfs: ops_inode.c 

Log message:
	Bugzilla 242759:
	
	Bump into this problem while debugging bug #236565 (GFS SPECsfs panic).
	Apparently a minor oversight while adding new function into GFS for
	RHEL5. GFS versions <= RHEL4 is immuned from this issue.
	
	Upon memory pressure, VM starts to release inode cache entries that would
	fail gfs iget. GFS1 flags this error as "ENOMEM" but returns from gfs_create
	call without releasing the glock.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_inode.c.diff?cvsroot=cluster&r1=1.17&r2=1.18

--- cluster/gfs-kernel/src/gfs/ops_inode.c	2007/06/05 18:15:51	1.17
+++ cluster/gfs-kernel/src/gfs/ops_inode.c	2007/06/05 18:21:22	1.18
@@ -151,9 +151,9 @@
 	gfs_inode_put(ip);
 
 	if (!inode)
-		return -ENOMEM;
-
-	error = gfs_security_init(dip, ip);
+		error = -ENOMEM;
+	else
+		error = gfs_security_init(dip, ip);
 
 	gfs_glock_dq_uninit(&d_gh);
 	gfs_glock_dq_uninit(&i_gh);



^ permalink raw reply	[flat|nested] 9+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_inode.c
@ 2007-06-05 17:46 wcheng
  0 siblings, 0 replies; 9+ messages in thread
From: wcheng @ 2007-06-05 17:46 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL5
Changes by:	wcheng at sourceware.org	2007-06-05 17:46:38

Modified files:
	gfs-kernel/src/gfs: ops_inode.c 

Log message:
	Bugzilla 242759
	
	Upon memory pressure, vm could start to release inode cache entries.
	Any process trying to get an inode during this time period will fail.
	GFS flags the error as "ENOMEM" but leaves gfs_create() call without
	unlocking the associated directory and file glocks. This patch fixes
	this minor coding oversight.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_inode.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.15.2.1&r2=1.15.2.2

--- cluster/gfs-kernel/src/gfs/ops_inode.c	2007/06/05 05:43:14	1.15.2.1
+++ cluster/gfs-kernel/src/gfs/ops_inode.c	2007/06/05 17:46:38	1.15.2.2
@@ -151,9 +151,9 @@
 	gfs_inode_put(ip);
 
 	if (!inode)
-		return -ENOMEM;
-
-	error = gfs_security_init(dip, ip);
+		error = -ENOMEM;
+	else
+		error = gfs_security_init(dip, ip);
 
 	gfs_glock_dq_uninit(&d_gh);
 	gfs_glock_dq_uninit(&i_gh);



^ permalink raw reply	[flat|nested] 9+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_inode.c
@ 2007-01-18 20:40 wcheng
  0 siblings, 0 replies; 9+ messages in thread
From: wcheng @ 2007-01-18 20:40 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL4
Changes by:	wcheng at sourceware.org	2007-01-18 20:40:12

Modified files:
	gfs-kernel/src/gfs: ops_inode.c 

Log message:
	bugzilla 190475 (rename 3-2):
	
	This patch fixes:
	Current rename doesn't lock the (to be deleted) source file during
	rename operation unless the source is a directory. This issue would
	not show up in a single node case since VFS layer has done its i_sem/
	i_mutex locks for both directories and involved files.
	
	This patch excludes a previous proposed change (in the draft patch sent
	out for review in cluster-devel list) that adds gfs rename global lock
	to prevent a rare case of cluster deadlock. Since cluster rename global
	lock is very expensive, we prefer to find other means to fix that issue.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_inode.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.6.2.4&r2=1.6.2.5

--- cluster/gfs-kernel/src/gfs/ops_inode.c	2006/10/10 18:35:27	1.6.2.4
+++ cluster/gfs-kernel/src/gfs/ops_inode.c	2007/01/18 20:40:12	1.6.2.5
@@ -968,15 +968,19 @@
 			goto fail;
 	}
 
-	gfs_holder_init(odip->i_gl, LM_ST_EXCLUSIVE, 0, &ghs[0]);
-	gfs_holder_init(ndip->i_gl, LM_ST_EXCLUSIVE, 0, &ghs[1]);
-	num_gh = 2;
-
-	if (nip)
-		gfs_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, 0, &ghs[num_gh++]);
+	num_gh = 1;
+	gfs_holder_init(odip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
+	if (odip != ndip) {
+		gfs_holder_init(ndip->i_gl, LM_ST_EXCLUSIVE, 0, ghs+num_gh);
+		num_gh++;
+	}
+	gfs_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs+num_gh);
+	num_gh++;
 
-	if (dir_rename)
-		gfs_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ghs[num_gh++]);
+	if (nip) {
+		gfs_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh);
+		num_gh++;
+	}
 
 	error = gfs_glock_nq_m(num_gh, ghs);
 	if (error)



^ permalink raw reply	[flat|nested] 9+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_inode.c
@ 2006-10-23 20:47 bmarzins
  0 siblings, 0 replies; 9+ messages in thread
From: bmarzins @ 2006-10-23 20:47 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	bmarzins at sourceware.org	2006-10-23 20:47:23

Modified files:
	gfs-kernel/src/gfs: ops_inode.c 

Log message:
	Really gross hack!!!
	This is a workaround for one of the bugs the got lumped into 166701. It
	breaks POSIX behavior in a corner case to avoid crashing... It's icky.
	
	when NFS opens a file with O_CREAT, the kernel nfs daemon checks to see
	if the file exists. If it does, nfsd does the *right thing* (either opens the
	file, or if the file was opened with O_EXCL, returns an error).  If the file
	doesn't exist, it passes the request down to the underlying file system.
	Unfortunately, since nfs *knows* that the file doesn't exist, it doesn't
	bother to pass a nameidata structure, which would include the intent
	information. However since gfs is a cluster file system, the file could have
	been created on another node after nfs checks for it. If this is the case,
	gfs needs the intent information to do the *right thing*.  It panics when
	it finds a NULL pointer, instead of the nameidata. Now, instead of panicing,
	if gfs finds a NULL nameidata pointer. It assumes that the file was not
	created with O_EXCL.
	
	This assumption could be wrong, with the result that an application could
	thing that it has created a new file, when in fact, it has opened an existing
	one.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_inode.c.diff?cvsroot=cluster&r1=1.14&r2=1.15

--- cluster/gfs-kernel/src/gfs/ops_inode.c	2006/10/15 07:25:09	1.14
+++ cluster/gfs-kernel/src/gfs/ops_inode.c	2006/10/23 20:47:23	1.15
@@ -118,7 +118,7 @@
 		if (!error)
 			break;
 		else if (error != -EEXIST ||
-			 (nd->intent.open.flags & O_EXCL)) {
+			 (nd && (nd->intent.open.flags & O_EXCL))) {
 			gfs_holder_uninit(&d_gh);
 			return error;
 		}



^ permalink raw reply	[flat|nested] 9+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_inode.c
@ 2006-10-15  7:25 wcheng
  0 siblings, 0 replies; 9+ messages in thread
From: wcheng @ 2006-10-15  7:25 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	wcheng at sourceware.org	2006-10-15 07:25:14

Modified files:
	gfs-kernel/src/gfs: ops_inode.c 

Log message:
	Just found 2.6.18 kernel has something called down_read_non_onwer for
	rwsemaphore. If we can implement a similar function that does something
	like "up_write_if_owner", then we can put i_alloc_sem back to correct
	state. Correct the comment and mark this possibility.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_inode.c.diff?cvsroot=cluster&r1=1.13&r2=1.14

--- cluster/gfs-kernel/src/gfs/ops_inode.c	2006/10/15 06:32:06	1.13
+++ cluster/gfs-kernel/src/gfs/ops_inode.c	2006/10/15 07:25:09	1.14
@@ -1349,14 +1349,13 @@
 	 * To avoid this to happen, i_alloc_sem must be dropped and trust
 	 * be put into glock that it can carry the same protection. 
 	 *
-	 * One issue with dropping i_alloc_sem is gfs_setattr() can be 
-	 * called from other code path without this sempaphore. Since linux
-	 * semaphore implementation doesn't include owner id, we have no way 
-	 * to reliably decide whether the following "up" is a correct reset. 
-	 * This implies if i_alloc_sem is ever used by non-direct_IO code 
-	 * path in the future, this hack will fall apart. In short, with this 
-	 * change, i_alloc_sem has become a meaningless lock within GFS and 
-	 * don't expect its counter representing any correct state. 
+	 * One issue with dropping i_alloc_sem is that the gfs_setattr() 
+	 * can be invoked from other code path without this sempaphore. 
+	 * We'll need a new rwsem function that can "up" the semaphore 
+	 * only when it is needed. Before that happens (will research the 
+	 * possibility), i_alloc_sem (now) is a meaningless lock within 
+	 * GFS. If it is ever been used by other non-directIO code, this
+	 * hack will fall apart.
 	 *
 	 * wcheng at redhat.com 10/14/06  
 	 */ 



^ permalink raw reply	[flat|nested] 9+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_inode.c
@ 2006-10-03 17:27 rohara
  0 siblings, 0 replies; 9+ messages in thread
From: rohara @ 2006-10-03 17:27 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	rohara at sourceware.org	2006-10-03 17:27:34

Modified files:
	gfs-kernel/src/gfs: ops_inode.c 

Log message:
	Added gfs_security_init to initialize SELinux xattrs for newly created
	inodes.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_inode.c.diff?cvsroot=cluster&r1=1.11&r2=1.12

--- cluster/gfs-kernel/src/gfs/ops_inode.c	2006/07/10 23:22:34	1.11
+++ cluster/gfs-kernel/src/gfs/ops_inode.c	2006/10/03 17:27:34	1.12
@@ -24,6 +24,7 @@
 #include <linux/mm.h>
 #include <linux/xattr.h>
 #include <linux/posix_acl.h>
+#include <linux/security.h>
 
 #include "gfs.h"
 #include "acl.h"
@@ -43,6 +44,48 @@
 #include "unlinked.h"
 
 /**
+ * gfs_security_init -
+ * @dip:
+ * @ip:
+ *
+ * Returns: errno
+ */
+
+static int
+gfs_security_init(struct gfs_inode *dip, struct gfs_inode *ip)
+{
+	int err;
+	size_t len;
+	void *value;
+	char *name;
+	struct gfs_ea_request er;
+
+	err = security_inode_init_security(ip->i_vnode, dip->i_vnode,
+					   &name, &value, &len);
+
+	if (err) {
+		if (err == -EOPNOTSUPP)
+			return 0;
+		return err;
+	}
+
+	memset(&er, 0, sizeof(struct gfs_ea_request));
+
+	er.er_type = GFS_EATYPE_SECURITY;
+	er.er_name = name;
+	er.er_data = value;
+	er.er_name_len = strlen(name);
+	er.er_data_len = len;
+
+	err = gfs_ea_set_i(ip, &er);
+
+	kfree(value);
+	kfree(name);
+
+	return err;
+}
+
+/**
  * gfs_create - Create a file
  * @dir: The directory in which to create the file
  * @dentry: The dentry of the new file
@@ -104,15 +147,20 @@
 		gfs_alloc_put(dip);
 	}
 
-	gfs_glock_dq_uninit(&d_gh);
-	gfs_glock_dq_uninit(&i_gh);
-
 	inode = gfs_iget(ip, CREATE);
 	gfs_inode_put(ip);
 
 	if (!inode)
 		return -ENOMEM;
 
+	error = gfs_security_init(dip, ip);
+
+	gfs_glock_dq_uninit(&d_gh);
+	gfs_glock_dq_uninit(&i_gh);
+
+	if (error)
+		return error;
+
 	d_instantiate(dentry, inode);
 	if (new)
 		mark_inode_dirty(inode);
@@ -595,11 +643,16 @@
 	gfs_unlinked_unlock(sdp, dip->i_alloc->al_ul);
 	gfs_alloc_put(dip);
 
+	inode = gfs_iget(ip, CREATE);
+	gfs_inode_put(ip);
+
+	error = gfs_security_init(dip, ip);
+
 	gfs_glock_dq_uninit(&d_gh);
 	gfs_glock_dq_uninit(&i_gh);
 
-	inode = gfs_iget(ip, CREATE);
-	gfs_inode_put(ip);
+	if (error)
+		return error;
 
 	if (!inode)
 		return -ENOMEM;
@@ -689,15 +742,20 @@
 	gfs_unlinked_unlock(sdp, dip->i_alloc->al_ul);
 	gfs_alloc_put(dip);
 
-	gfs_glock_dq_uninit(&d_gh);
-	gfs_glock_dq_uninit(&i_gh);
-
 	inode = gfs_iget(ip, CREATE);
 	gfs_inode_put(ip);
 
 	if (!inode)
 		return -ENOMEM;
 
+	error = gfs_security_init(dip, ip);
+
+	gfs_glock_dq_uninit(&d_gh);
+	gfs_glock_dq_uninit(&i_gh);
+
+	if (error)
+		return error;
+
 	d_instantiate(dentry, inode);
 	mark_inode_dirty(inode);
 
@@ -861,11 +919,16 @@
 	gfs_unlinked_unlock(sdp, dip->i_alloc->al_ul);
 	gfs_alloc_put(dip);
 
+	inode = gfs_iget(ip, CREATE);
+	gfs_inode_put(ip);
+
+	error = gfs_security_init(dip, ip);
+
 	gfs_glock_dq_uninit(&d_gh);
 	gfs_glock_dq_uninit(&i_gh);
 
-	inode = gfs_iget(ip, CREATE);
-	gfs_inode_put(ip);
+	if (error)
+		return error;
 
 	if (!inode)
 		return -ENOMEM;



^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2007-07-11 22:15 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-07-11 22:15 [Cluster-devel] cluster/gfs-kernel/src/gfs ops_inode.c rpeterso
  -- strict thread matches above, loose matches on Subject: below --
2007-07-11 21:58 rpeterso
2007-06-29 21:57 rpeterso
2007-06-05 18:21 wcheng
2007-06-05 17:46 wcheng
2007-01-18 20:40 wcheng
2006-10-23 20:47 bmarzins
2006-10-15  7:25 wcheng
2006-10-03 17:27 rohara

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).