* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2008-01-30 9:05 fabbione
0 siblings, 0 replies; 15+ messages in thread
From: fabbione @ 2008-01-30 9:05 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: fabbione at sourceware.org 2008-01-30 09:05:57
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
Whitespace cleanup
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&r1=1.39&r2=1.40
--- cluster/gfs-kernel/src/gfs/ops_file.c 2008/01/30 06:37:53 1.39
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2008/01/30 09:05:57 1.40
@@ -439,16 +439,16 @@
/*
* gfs_aio_read: match with vfs generic_file_aio_read as:
- * (struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
+ * (struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
*/
static ssize_t
gfs_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long count,
- loff_t pos)
+ loff_t pos)
{
- struct file *filp = iocb->ki_filp;
+ struct file *filp = iocb->ki_filp;
- BUG_ON(iocb->ki_pos != pos);
- return(__gfs_read(filp, iov->iov_base, iov->iov_len, &iocb->ki_pos, iocb));
+ BUG_ON(iocb->ki_pos != pos);
+ return(__gfs_read(filp, iov->iov_base, iov->iov_len, &iocb->ki_pos, iocb));
}
/**
@@ -534,12 +534,12 @@
struct kiocb *iocb)
{
struct inode *inode = file->f_mapping->host;
- struct gfs_inode *ip = get_v2ip(inode);
- struct gfs_sbd *sdp = ip->i_sbd;
- struct gfs_alloc *al = NULL;
- struct buffer_head *dibh;
- unsigned int data_blocks, ind_blocks;
- ssize_t count;
+ struct gfs_inode *ip = get_v2ip(inode);
+ struct gfs_sbd *sdp = ip->i_sbd;
+ struct gfs_alloc *al = NULL;
+ struct buffer_head *dibh;
+ unsigned int data_blocks, ind_blocks;
+ ssize_t count;
int error;
gfs_write_calc_reserv(ip, size, &data_blocks, &ind_blocks);
@@ -584,13 +584,13 @@
}
if (gfs_is_stuffed(ip)) { error = gfs_unstuff_dinode(ip, gfs_unstuffer_sync, NULL); if (error)
- goto fail_end_trans;
- }
+ goto fail_end_trans;
+ }
- count = gfs_file_aio_write_nolock(file, buf, size, offset, iocb);
- if (count < 0) {
- error = count;
- goto fail_end_trans;
+ count = gfs_file_aio_write_nolock(file, buf, size, offset, iocb);
+ if (count < 0) {
+ error = count;
+ goto fail_end_trans;
}
error = gfs_get_inode_buffer(ip, &dibh);
@@ -748,23 +748,23 @@
buf += error;
size -= error;
- count += error;
- }
- } else {
- struct gfs_holder t_gh;
+ count += error;
+ }
+ } else {
+ struct gfs_holder t_gh;
- clear_bit(GFF_DID_DIRECT_ALLOC, &fp->f_flags);
+ clear_bit(GFF_DID_DIRECT_ALLOC, &fp->f_flags);
error = gfs_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
if (error)
goto out_gunlock;
- /* Todo: It would be nice if init_sync_kiocb is exported.
- * .. wcheng
- */
- count = gfs_file_aio_write_nolock(file, buf, size, offset, iocb);
- gfs_glock_dq_uninit(&t_gh);
- }
+ /* Todo: It would be nice if init_sync_kiocb is exported.
+ * .. wcheng
+ */
+ count = gfs_file_aio_write_nolock(file, buf, size, offset, iocb);
+ gfs_glock_dq_uninit(&t_gh);
+ }
out_iocb_write:
error = 0;
@@ -882,13 +882,13 @@
ClearPageUptodate(page);
page_cache_release(page);
}
- }
- *offset += count;
- } else {
- count = gfs_file_aio_write_nolock(file, buf, size, offset, iocb);
- if (count < 0) {
- error = count;
- goto fail_end_trans;
+ }
+ *offset += count;
+ } else {
+ count = gfs_file_aio_write_nolock(file, buf, size, offset, iocb);
+ if (count < 0) {
+ error = count;
+ goto fail_end_trans;
}
error = gfs_get_inode_buffer(ip, &dibh);
@@ -1062,14 +1062,14 @@
static ssize_t
gfs_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long segs,
- loff_t pos)
+ loff_t pos)
{
- struct file *file = iocb->ki_filp;
+ struct file *file = iocb->ki_filp;
- BUG_ON(iocb->ki_pos != pos);
+ BUG_ON(iocb->ki_pos != pos);
- return(__gfs_write(file, iov->iov_base, iov->iov_len, &iocb->ki_pos,
- iocb));
+ return(__gfs_write(file, iov->iov_base, iov->iov_len, &iocb->ki_pos,
+ iocb));
}
/**
@@ -1388,7 +1388,7 @@
static long
gfs_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
- struct gfs_inode *ip = get_v2ip(file->f_mapping->host);
+ struct gfs_inode *ip = get_v2ip(file->f_mapping->host);
atomic_inc(&ip->i_sbd->sd_ops_file);
@@ -1400,11 +1400,11 @@
return 0;
}
- case GFS_IOCTL_SUPER:
- return gfs_ioctl_i_compat(ip, arg);
+ case GFS_IOCTL_SUPER:
+ return gfs_ioctl_i_compat(ip, arg);
- default:
- return -ENOTTY;
+ default:
+ return -ENOTTY;
}
}
#endif
@@ -1613,15 +1613,15 @@
if (!(fl->fl_flags & FL_POSIX))
return -ENOLCK;
- if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
- return -ENOLCK;
+ if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ return -ENOLCK;
- if (IS_GETLK(cmd))
- return gfs_lm_plock_get(sdp, &name, file, fl);
- else if (fl->fl_type == F_UNLCK)
+ if (IS_GETLK(cmd))
+ return gfs_lm_plock_get(sdp, &name, file, fl);
+ else if (fl->fl_type == F_UNLCK)
return gfs_lm_punlock(sdp, &name, file, fl);
else
- return gfs_lm_plock(sdp, &name, file, cmd, fl);
+ return gfs_lm_plock(sdp, &name, file, cmd, fl);
}
/**
@@ -1661,7 +1661,7 @@
out:
gfs_holder_uninit(&gh);
- return retval;
+ return retval;
}
/**
@@ -1756,18 +1756,18 @@
static int
gfs_flock(struct file *file, int cmd, struct file_lock *fl)
{
- struct gfs_inode *ip = get_v2ip(file->f_mapping->host);
+ struct gfs_inode *ip = get_v2ip(file->f_mapping->host);
- atomic_inc(&ip->i_sbd->sd_ops_file);
+ atomic_inc(&ip->i_sbd->sd_ops_file);
if (!(fl->fl_flags & FL_FLOCK))
return -ENOLCK;
- if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
- return -ENOLCK;
+ if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ return -ENOLCK;
- if (fl->fl_type == F_UNLCK) {
- do_unflock(file, fl);
- return 0;
+ if (fl->fl_type == F_UNLCK) {
+ do_unflock(file, fl);
+ return 0;
} else
return do_flock(file, cmd, fl);
}
@@ -1776,32 +1776,32 @@
.llseek = gfs_llseek,
.read = gfs_read,
.write = gfs_write,
- .aio_read = gfs_aio_read,
- .aio_write = gfs_aio_write,
+ .aio_read = gfs_aio_read,
+ .aio_write = gfs_aio_write,
.ioctl = gfs_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = gfs_compat_ioctl,
+ .compat_ioctl = gfs_compat_ioctl,
#endif
.mmap = gfs_mmap,
.open = gfs_open,
- .release = gfs_close,
- .fsync = gfs_fsync,
- .lock = gfs_lock,
- .splice_read = gfs_splice_read,
- .flock = gfs_flock,
+ .release = gfs_close,
+ .fsync = gfs_fsync,
+ .lock = gfs_lock,
+ .splice_read = gfs_splice_read,
+ .flock = gfs_flock,
};
struct file_operations gfs_dir_fops = {
.readdir = gfs_readdir,
.ioctl = gfs_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = gfs_compat_ioctl,
+ .compat_ioctl = gfs_compat_ioctl,
#endif
.open = gfs_open,
.release = gfs_close,
.fsync = gfs_fsync,
- .lock = gfs_lock,
- .flock = gfs_flock,
+ .lock = gfs_lock,
+ .flock = gfs_flock,
};
struct file_operations gfs_file_fops_nolock = {
^ permalink raw reply [flat|nested] 15+ messages in thread* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2008-01-28 6:36 fabbione
0 siblings, 0 replies; 15+ messages in thread
From: fabbione @ 2008-01-28 6:36 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: fabbione at sourceware.org 2008-01-28 06:36:18
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
fix gfs for the removal of sendfile and helper functions
Sendfile and helper functions have been removed in 2.6.24. Migrate
to using splice_read and generic_file_splice_read helper function.
Signed-off-by: Phillip Lougher <phillip@canonical.com>
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&r1=1.37&r2=1.38
--- cluster/gfs-kernel/src/gfs/ops_file.c 2008/01/24 20:54:31 1.37
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2008/01/28 06:36:18 1.38
@@ -1633,13 +1633,12 @@
return gfs_lm_plock(sdp, &name, file, cmd, fl);
}
-#if 0
/**
- * gfs_sendfile - Send bytes to a file or socket
+ * gfs_splice_read - Send bytes to a file or socket
* @in_file: The file to read from
* @out_file: The file to write to
* @count: The amount of data
- * @offset: The beginning file offset
+ * @ppos: The beginning file offset
*
* Outputs: offset - updated according to number of bytes read
*
@@ -1647,7 +1646,7 @@
*/
static ssize_t
-gfs_sendfile(struct file *in_file, loff_t *offset, size_t count, read_actor_t actor, void __user *target)
+gfs_splice_read(struct file *in_file, loff_t *ppos, struct pipe_inode_info *pipe, size_t count, unsigned int flags)
{
struct gfs_inode *ip = get_v2ip(in_file->f_mapping->host);
struct gfs_holder gh;
@@ -1664,7 +1663,7 @@
if (gfs_is_jdata(ip))
retval = -ENOSYS;
else
- retval = generic_file_sendfile(in_file, offset, count, actor, target);
+ retval = generic_file_splice_read(in_file, ppos, pipe, count, flags);
gfs_glock_dq(&gh);
@@ -1673,7 +1672,6 @@
return retval;
}
-#endif
/**
* do_flock - Acquire a flock on a file
@@ -1802,7 +1800,7 @@
.release = gfs_close,
.fsync = gfs_fsync,
.lock = gfs_lock,
- /* .sendfile = gfs_sendfile, */
+ .splice_read = gfs_splice_read,
.flock = gfs_flock,
};
^ permalink raw reply [flat|nested] 15+ messages in thread* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2007-06-17 3:35 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2007-06-17 3:35 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: wcheng at sourceware.org 2007-06-17 03:35:03
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
bugzilla 244343:
Backport RHEL4 gfs datasync patch to head.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&r1=1.34&r2=1.35
--- cluster/gfs-kernel/src/gfs/ops_file.c 2007/06/06 15:11:54 1.34
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2007/06/17 03:35:02 1.35
@@ -24,6 +24,7 @@
#include <linux/blkdev.h>
#include <linux/mm.h>
#include <linux/aio.h>
+#include <linux/writeback.h>
#include <asm/uaccess.h>
#include "gfs_ioctl.h"
@@ -1528,6 +1529,7 @@
{
struct gfs_inode *ip = get_v2ip(dentry->d_inode);
struct gfs_holder i_gh;
+ struct inode *inode = dentry->d_inode;
int error;
atomic_inc(&ip->i_sbd->sd_ops_file);
@@ -1538,8 +1540,15 @@
if (gfs_is_jdata(ip))
gfs_log_flush_glock(ip->i_gl);
- else
- i_gh.gh_flags |= GL_SYNC;
+ else {
+ if ((!datasync) || (inode->i_state & I_DIRTY_DATASYNC)) {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 0,
+ };
+ error = sync_inode(inode, &wbc);
+ }
+ }
gfs_glock_dq_uninit(&i_gh);
^ permalink raw reply [flat|nested] 15+ messages in thread* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2007-06-17 2:56 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2007-06-17 2:56 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL5
Changes by: wcheng at sourceware.org 2007-06-17 02:56:43
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
bugzilla 244134:
Backport gfs datasync patch from RHEL4 to RHEL5 - sampled performance data:
Throughput (lock_nolock)
--- before ---- --- after ---
Min 238.14 KB/s Min 1.00 MB/s
Max 238.14 KB/s Max 1.00 MB/s
Avg 238.14 KB/s Avg 1.00 MB/s
Latency Latency (lock_nolock)
--- before --------- --- after ---------
Min 3805 usec 3 msec Min 821 usec 0 msec
Max 6043 usec 6 msec Max 5466 usec 5 msec
Avg 4182 usec 4 msec Avg 1050 usec 1 msec
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.28.2.1&r2=1.28.2.2
--- cluster/gfs-kernel/src/gfs/ops_file.c 2006/11/17 19:48:59 1.28.2.1
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2007/06/17 02:56:43 1.28.2.2
@@ -24,6 +24,7 @@
#include <linux/blkdev.h>
#include <linux/mm.h>
#include <linux/aio.h>
+#include <linux/writeback.h>
#include <asm/uaccess.h>
#include "gfs_ioctl.h"
@@ -1514,6 +1515,7 @@
{
struct gfs_inode *ip = get_v2ip(dentry->d_inode);
struct gfs_holder i_gh;
+ struct inode *inode = dentry->d_inode;
int error;
atomic_inc(&ip->i_sbd->sd_ops_file);
@@ -1524,8 +1526,15 @@
if (gfs_is_jdata(ip))
gfs_log_flush_glock(ip->i_gl);
- else
- i_gh.gh_flags |= GL_SYNC;
+ else {
+ if ((!datasync) || (inode->i_state & I_DIRTY_DATASYNC)) {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 0,
+ };
+ error = sync_inode(inode, &wbc);
+ }
+ }
gfs_glock_dq_uninit(&i_gh);
^ permalink raw reply [flat|nested] 15+ messages in thread* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2007-05-24 22:34 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2007-05-24 22:34 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: wcheng at sourceware.org 2007-05-24 22:34:54
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
Apparently we can't remove these two methods from file operations table.
Since gfs_read() had been changed to use do_sync_read() that requires to
have aio defined in the file operations table.
So vector read/write (implies NFSD) will be partially broken again after
we put these two methods back.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&r1=1.32&r2=1.33
--- cluster/gfs-kernel/src/gfs/ops_file.c 2007/05/04 14:49:35 1.32
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2007/05/24 22:34:54 1.33
@@ -1750,6 +1750,8 @@
.llseek = gfs_llseek,
.read = gfs_read,
.write = gfs_write,
+ .aio_read = gfs_aio_read,
+ .aio_write = gfs_aio_write,
.ioctl = gfs_ioctl,
.mmap = gfs_mmap,
.open = gfs_open,
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2007-05-04 14:49 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2007-05-04 14:49 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: wcheng at sourceware.org 2007-05-04 14:49:36
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
Temporarily disable GFS natvie AIO support since it currently breaks
vector read-write (used by user mode application system call and NFSD).
Will come back to fix this soon. Right now, application is expected to
use posix AIO AIO call (done by libc AIO emulation).
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&r1=1.31&r2=1.32
--- cluster/gfs-kernel/src/gfs/ops_file.c 2007/02/02 21:01:04 1.31
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2007/05/04 14:49:35 1.32
@@ -1750,8 +1750,6 @@
.llseek = gfs_llseek,
.read = gfs_read,
.write = gfs_write,
- .aio_read = gfs_aio_read,
- .aio_write = gfs_aio_write,
.ioctl = gfs_ioctl,
.mmap = gfs_mmap,
.open = gfs_open,
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2007-01-17 22:30 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2007-01-17 22:30 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: STABLE
Changes by: wcheng at sourceware.org 2007-01-17 22:30:13
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
Remove aio code that was mistakenly added into stable cvs branch.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&only_with_tag=STABLE&r1=1.16.6.2.2.6&r2=1.16.6.2.2.7
--- cluster/gfs-kernel/src/gfs/ops_file.c 2006/11/17 20:39:24 1.16.6.2.2.6
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2007/01/17 22:30:12 1.16.6.2.2.7
@@ -646,12 +646,6 @@
if (alloc_required) {
set_bit(GFF_DID_DIRECT_ALLOC, &fp->f_flags);
- /* for asynchronous IO, the buffer can not be splitted */
- if (iocb) {
- count = do_write_direct_alloc(file, buf, size, offset, iocb);
- goto out_iocb_write;
- }
-
/* split large writes into smaller atomic transactions */
while (size) {
s = gfs_tune_get(sdp, gt_max_atomic_write);
@@ -681,7 +675,6 @@
gfs_glock_dq_uninit(&t_gh);
}
- out_iocb_write:
error = 0;
out_gunlock:
^ permalink raw reply [flat|nested] 15+ messages in thread* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2006-11-17 20:39 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2006-11-17 20:39 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: STABLE
Changes by: wcheng at sourceware.org 2006-11-17 20:39:24
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
Bugzilla 216209: GFS has been splitting large writes into smaller atomic
transactions. This would generate multiple aio completion calls (one
for each transaction) that falsely notify application about data
completion. Problem is reported by QA team while doing RHEL5 testing.
Issue affects aio io size > 4MB.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&only_with_tag=STABLE&r1=1.16.6.2.2.5&r2=1.16.6.2.2.6
--- cluster/gfs-kernel/src/gfs/ops_file.c 2006/06/29 16:50:41 1.16.6.2.2.5
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2006/11/17 20:39:24 1.16.6.2.2.6
@@ -646,6 +646,12 @@
if (alloc_required) {
set_bit(GFF_DID_DIRECT_ALLOC, &fp->f_flags);
+ /* for asynchronous IO, the buffer can not be splitted */
+ if (iocb) {
+ count = do_write_direct_alloc(file, buf, size, offset, iocb);
+ goto out_iocb_write;
+ }
+
/* split large writes into smaller atomic transactions */
while (size) {
s = gfs_tune_get(sdp, gt_max_atomic_write);
@@ -675,6 +681,7 @@
gfs_glock_dq_uninit(&t_gh);
}
+ out_iocb_write:
error = 0;
out_gunlock:
^ permalink raw reply [flat|nested] 15+ messages in thread* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2006-11-17 20:33 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2006-11-17 20:33 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL4
Changes by: wcheng at sourceware.org 2006-11-17 20:33:16
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
Bugzilla 216209: GFS has been splitting large writes into smaller atomic
transactions. This would generate multiple aio completion calls (one
for each transaction) that falsely notify application about data
completion. Problem is reported by QA team while doing RHEL5 testing.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.16.2.14&r2=1.16.2.15
--- cluster/gfs-kernel/src/gfs/ops_file.c 2006/06/07 14:06:11 1.16.2.14
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2006/11/17 20:33:15 1.16.2.15
@@ -709,6 +709,12 @@
if (alloc_required) {
set_bit(GFF_DID_DIRECT_ALLOC, &fp->f_flags);
+ /* for asynchronous IO, the buffer can not be splitted */
+ if (iocb) {
+ count = do_write_direct_alloc(file, buf, size, offset, iocb);
+ goto out_iocb_write;
+ }
+
/* split large writes into smaller atomic transactions */
while (size) {
s = gfs_tune_get(sdp, gt_max_atomic_write);
@@ -746,6 +752,7 @@
gfs_glock_dq_uninit(&t_gh);
}
+ out_iocb_write:
error = 0;
out_gunlock:
^ permalink raw reply [flat|nested] 15+ messages in thread* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2006-11-17 20:26 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2006-11-17 20:26 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL50
Changes by: wcheng at sourceware.org 2006-11-17 20:26:28
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
Bugzilla 214274: GFS has been splitting large writes into smaller atomic
transactions. This would generate multiple aio completion calls (one
for each transaction) that falsely notify application about data
completion. Problem is reported by QA team as data corruption.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&only_with_tag=RHEL50&r1=1.28&r2=1.28.4.1
--- cluster/gfs-kernel/src/gfs/ops_file.c 2006/10/27 16:22:17 1.28
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2006/11/17 20:26:28 1.28.4.1
@@ -700,6 +700,12 @@
if (alloc_required) {
set_bit(GFF_DID_DIRECT_ALLOC, &fp->f_flags);
+ /* for asynchronous IO, the buffer can not be splitted */
+ if (iocb) {
+ count = do_write_direct_alloc(file, buf, size, offset, iocb);
+ goto out_iocb_write;
+ }
+
/* split large writes into smaller atomic transactions */
while (size) {
s = gfs_tune_get(sdp, gt_max_atomic_write);
@@ -737,6 +743,7 @@
gfs_glock_dq_uninit(&t_gh);
}
+out_iocb_write:
error = 0;
out_gunlock:
^ permalink raw reply [flat|nested] 15+ messages in thread* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2006-11-17 19:49 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2006-11-17 19:49 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL5
Changes by: wcheng at sourceware.org 2006-11-17 19:49:00
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
Bugzilla 214274: GFS has been splitting large writes into smaller atomic
transactions. This would generate multiple aio completion calls (one
for each transaction) that falsely notify application about data
completion. Problem is reported by QA team as data corruption.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.28&r2=1.28.2.1
--- cluster/gfs-kernel/src/gfs/ops_file.c 2006/10/27 16:22:17 1.28
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2006/11/17 19:48:59 1.28.2.1
@@ -700,6 +700,12 @@
if (alloc_required) {
set_bit(GFF_DID_DIRECT_ALLOC, &fp->f_flags);
+ /* for asynchronous IO, the buffer can not be splitted */
+ if (iocb) {
+ count = do_write_direct_alloc(file, buf, size, offset, iocb);
+ goto out_iocb_write;
+ }
+
/* split large writes into smaller atomic transactions */
while (size) {
s = gfs_tune_get(sdp, gt_max_atomic_write);
@@ -737,6 +743,7 @@
gfs_glock_dq_uninit(&t_gh);
}
+out_iocb_write:
error = 0;
out_gunlock:
^ permalink raw reply [flat|nested] 15+ messages in thread* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2006-11-17 16:15 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2006-11-17 16:15 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: wcheng at sourceware.org 2006-11-17 16:15:32
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
Bugzilla 214274: Oops... only directIO has this issue - buffer IO should be
fine. Revert buffer io changes.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&r1=1.29&r2=1.30
--- cluster/gfs-kernel/src/gfs/ops_file.c 2006/11/17 05:00:57 1.29
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2006/11/17 16:15:31 1.30
@@ -700,15 +700,18 @@
if (alloc_required) {
set_bit(GFF_DID_DIRECT_ALLOC, &fp->f_flags);
+ /* for asynchronous IO, the buffer can not be splitted */
+ if (iocb) {
+ count = do_write_direct_alloc(file, buf, size, offset, iocb);
+ goto out_iocb_write;
+ }
+
/* split large writes into smaller atomic transactions */
while (size) {
- if (iocb)
+ s = gfs_tune_get(sdp, gt_max_atomic_write);
+ if (s > size)
s = size;
- else {
- s = gfs_tune_get(sdp, gt_max_atomic_write);
- if (s > size)
- s = size;
- }
+
error = do_write_direct_alloc(file, buf, s, offset, iocb);
if (error < 0)
goto out_gunlock;
@@ -740,6 +743,7 @@
gfs_glock_dq_uninit(&t_gh);
}
+out_iocb_write:
error = 0;
out_gunlock:
@@ -977,13 +981,9 @@
/* split large writes into smaller atomic transactions */
while (size) {
- if (iocb)
+ s = gfs_tune_get(sdp, gt_max_atomic_write);
+ if (s > size)
s = size;
- else {
- s = gfs_tune_get(sdp, gt_max_atomic_write);
- if (s > size)
- s = size;
- }
error = do_do_write_buf(file, buf, s, offset, iocb);
if (error < 0)
^ permalink raw reply [flat|nested] 15+ messages in thread* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2006-11-17 5:00 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2006-11-17 5:00 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: wcheng at sourceware.org 2006-11-17 05:00:57
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
Bugzilla 214274: GFS has been splitting large writes into smaller atomic
transactions. This would generate multiple aio completion calls (one
for each transaction) that falsely notify application about data
completion. Problem is reported by QA team as data corruption.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&r1=1.28&r2=1.29
--- cluster/gfs-kernel/src/gfs/ops_file.c 2006/10/27 16:22:17 1.28
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2006/11/17 05:00:57 1.29
@@ -702,10 +702,13 @@
/* split large writes into smaller atomic transactions */
while (size) {
- s = gfs_tune_get(sdp, gt_max_atomic_write);
- if (s > size)
+ if (iocb)
s = size;
-
+ else {
+ s = gfs_tune_get(sdp, gt_max_atomic_write);
+ if (s > size)
+ s = size;
+ }
error = do_write_direct_alloc(file, buf, s, offset, iocb);
if (error < 0)
goto out_gunlock;
@@ -974,9 +977,13 @@
/* split large writes into smaller atomic transactions */
while (size) {
- s = gfs_tune_get(sdp, gt_max_atomic_write);
- if (s > size)
+ if (iocb)
s = size;
+ else {
+ s = gfs_tune_get(sdp, gt_max_atomic_write);
+ if (s > size)
+ s = size;
+ }
error = do_do_write_buf(file, buf, s, offset, iocb);
if (error < 0)
^ permalink raw reply [flat|nested] 15+ messages in thread* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2006-10-27 16:22 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2006-10-27 16:22 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: wcheng at sourceware.org 2006-10-27 16:22:18
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
Bugzilla 211622 - Root issue is found and fix. Backout the workaround.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&r1=1.27&r2=1.28
--- cluster/gfs-kernel/src/gfs/ops_file.c 2006/10/24 03:35:11 1.27
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2006/10/27 16:22:17 1.28
@@ -303,7 +303,7 @@
struct inode *inode = file->f_mapping->host;
struct gfs_inode *ip = get_v2ip(inode);
unsigned int state = LM_ST_DEFERRED;
- int flags = LM_FLAG_ANY;
+ int flags = 0;
unsigned int x;
ssize_t count = 0;
int error;
@@ -635,7 +635,7 @@
struct gfs_inode *ip = get_v2ip(file->f_mapping->host);
struct gfs_sbd *sdp = ip->i_sbd;
struct gfs_file *fp = get_v2fp(file);
- unsigned int state = LM_ST_EXCLUSIVE;
+ unsigned int state = LM_ST_DEFERRED;
int alloc_required;
unsigned int x;
size_t s;
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c
@ 2006-10-24 3:35 wcheng
0 siblings, 0 replies; 15+ messages in thread
From: wcheng @ 2006-10-24 3:35 UTC (permalink / raw)
To: cluster-devel.redhat.com
CVSROOT: /cvs/cluster
Module name: cluster
Changes by: wcheng at sourceware.org 2006-10-24 03:35:11
Modified files:
gfs-kernel/src/gfs: ops_file.c
Log message:
Bugzilla 211622: GFS1 will asserts at xmote_bh() if DLM grants SHARED
lock to direct IO's DEFERRED request. Add LM_FLAG_ANY
to direct read to allow relaxed state and change direct
write to use EXCLUSIVE lock.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_file.c.diff?cvsroot=cluster&r1=1.26&r2=1.27
--- cluster/gfs-kernel/src/gfs/ops_file.c 2006/10/17 19:02:35 1.26
+++ cluster/gfs-kernel/src/gfs/ops_file.c 2006/10/24 03:35:11 1.27
@@ -303,7 +303,7 @@
struct inode *inode = file->f_mapping->host;
struct gfs_inode *ip = get_v2ip(inode);
unsigned int state = LM_ST_DEFERRED;
- int flags = 0;
+ int flags = LM_FLAG_ANY;
unsigned int x;
ssize_t count = 0;
int error;
@@ -635,7 +635,7 @@
struct gfs_inode *ip = get_v2ip(file->f_mapping->host);
struct gfs_sbd *sdp = ip->i_sbd;
struct gfs_file *fp = get_v2fp(file);
- unsigned int state = LM_ST_DEFERRED;
+ unsigned int state = LM_ST_EXCLUSIVE;
int alloc_required;
unsigned int x;
size_t s;
^ permalink raw reply [flat|nested] 15+ messages in thread
end of thread, other threads:[~2008-01-30 9:05 UTC | newest]
Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-01-30 9:05 [Cluster-devel] cluster/gfs-kernel/src/gfs ops_file.c fabbione
-- strict thread matches above, loose matches on Subject: below --
2008-01-28 6:36 fabbione
2007-06-17 3:35 wcheng
2007-06-17 2:56 wcheng
2007-05-24 22:34 wcheng
2007-05-04 14:49 wcheng
2007-01-17 22:30 wcheng
2006-11-17 20:39 wcheng
2006-11-17 20:33 wcheng
2006-11-17 20:26 wcheng
2006-11-17 19:49 wcheng
2006-11-17 16:15 wcheng
2006-11-17 5:00 wcheng
2006-10-27 16:22 wcheng
2006-10-24 3:35 wcheng
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).