From: Sage Weil <sage@newdream.net>
To: linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org,
greg@kroah.com
Cc: Sage Weil <sage@newdream.net>
Subject: [PATCH 18/21] ceph: ioctls
Date: Fri, 19 Jun 2009 15:31:39 -0700 [thread overview]
Message-ID: <1245450702-31343-19-git-send-email-sage@newdream.net> (raw)
In-Reply-To: <1245450702-31343-18-git-send-email-sage@newdream.net>
A few Ceph ioctls for getting and setting file layout (striping)
parameters.
Signed-off-by: Sage Weil <sage@newdream.net>
---
fs/staging/ceph/ioctl.c | 65 +++++++++++++++++++++++++++++++++++++++++++++++
fs/staging/ceph/ioctl.h | 12 ++++++++
2 files changed, 77 insertions(+), 0 deletions(-)
create mode 100644 fs/staging/ceph/ioctl.c
create mode 100644 fs/staging/ceph/ioctl.h
diff --git a/fs/staging/ceph/ioctl.c b/fs/staging/ceph/ioctl.c
new file mode 100644
index 0000000..69c65c1
--- /dev/null
+++ b/fs/staging/ceph/ioctl.c
@@ -0,0 +1,65 @@
+#include "ioctl.h"
+#include "super.h"
+#include "ceph_debug.h"
+
+int ceph_debug_ioctl __read_mostly = -1;
+#define DOUT_MASK DOUT_MASK_IOCTL
+#define DOUT_VAR ceph_debug_ioctl
+
+
+/*
+ * ioctls
+ */
+
+static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
+{
+ struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode);
+ int err;
+
+ err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT);
+ if (!err) {
+ if (copy_to_user(arg, &ci->i_layout, sizeof(ci->i_layout)))
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
+ struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_request *req;
+ struct ceph_file_layout layout;
+ int err;
+
+ /* copy and validate */
+ if (copy_from_user(&layout, arg, sizeof(layout)))
+ return -EFAULT;
+
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT,
+ USE_AUTH_MDS);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->r_inode = igrab(inode);
+ req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
+ req->r_args.setlayout.layout = layout;
+
+ err = ceph_mdsc_do_request(mdsc, parent_inode, req);
+ ceph_mdsc_put_request(req);
+ return err;
+}
+
+long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ dout(10, "ioctl file %p cmd %u arg %lu\n", file, cmd, arg);
+ switch (cmd) {
+ case CEPH_IOC_GET_LAYOUT:
+ return ceph_ioctl_get_layout(file, (void __user *)arg);
+
+ case CEPH_IOC_SET_LAYOUT:
+ return ceph_ioctl_set_layout(file, (void __user *)arg);
+ }
+ return -ENOTTY;
+}
diff --git a/fs/staging/ceph/ioctl.h b/fs/staging/ceph/ioctl.h
new file mode 100644
index 0000000..537c27b
--- /dev/null
+++ b/fs/staging/ceph/ioctl.h
@@ -0,0 +1,12 @@
+#ifndef FS_CEPH_IOCTL_H
+#define FS_CEPH_IOCTL_H
+
+#include <linux/ioctl.h>
+#include "types.h"
+
+#define CEPH_IOCTL_MAGIC 0x97
+
+#define CEPH_IOC_GET_LAYOUT _IOR(CEPH_IOCTL_MAGIC, 1, struct ceph_file_layout)
+#define CEPH_IOC_SET_LAYOUT _IOW(CEPH_IOCTL_MAGIC, 2, struct ceph_file_layout)
+
+#endif
--
1.5.6.5
next prev parent reply other threads:[~2009-06-19 22:31 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-06-19 22:31 [PATCH 00/21] ceph: Ceph distributed file system client v0.9 Sage Weil
2009-06-19 22:31 ` [PATCH 01/21] fs: add fs/staging directory Sage Weil
2009-06-19 22:31 ` [PATCH 02/21] ceph: documentation Sage Weil
2009-06-19 22:31 ` [PATCH 03/21] ceph: on-wire types Sage Weil
2009-06-19 22:31 ` [PATCH 04/21] ceph: client types Sage Weil
2009-06-19 22:31 ` [PATCH 05/21] ceph: super.c Sage Weil
2009-06-19 22:31 ` [PATCH 06/21] ceph: inode operations Sage Weil
2009-06-19 22:31 ` [PATCH 07/21] ceph: directory operations Sage Weil
2009-06-19 22:31 ` [PATCH 08/21] ceph: file operations Sage Weil
2009-06-19 22:31 ` [PATCH 09/21] ceph: address space operations Sage Weil
2009-06-19 22:31 ` [PATCH 10/21] ceph: MDS client Sage Weil
2009-06-19 22:31 ` [PATCH 11/21] ceph: OSD client Sage Weil
2009-06-19 22:31 ` [PATCH 12/21] ceph: CRUSH mapping algorithm Sage Weil
2009-06-19 22:31 ` [PATCH 13/21] ceph: monitor client Sage Weil
2009-06-19 22:31 ` [PATCH 14/21] ceph: capability management Sage Weil
2009-06-19 22:31 ` [PATCH 15/21] ceph: snapshot management Sage Weil
2009-06-19 22:31 ` [PATCH 16/21] ceph: messenger library Sage Weil
2009-06-19 22:31 ` [PATCH 17/21] ceph: nfs re-export support Sage Weil
2009-06-19 22:31 ` Sage Weil [this message]
2009-06-19 22:31 ` [PATCH 19/21] ceph: debugging Sage Weil
2009-06-19 22:31 ` [PATCH 20/21] ceph: debugfs Sage Weil
2009-06-19 22:31 ` [PATCH 21/21] ceph: Kconfig, Makefile Sage Weil
2009-06-20 9:12 ` [PATCH 17/21] ceph: nfs re-export support Stefan Richter
2009-06-20 20:39 ` Sage Weil
2009-06-20 21:22 ` Stefan Richter
2009-06-19 22:44 ` [PATCH 00/21] ceph: Ceph distributed file system client v0.9 Greg KH
2009-06-19 23:15 ` Sage Weil
2009-06-19 23:20 ` Greg KH
2009-06-19 22:45 ` Greg KH
2009-06-19 22:54 ` Stephen Rothwell
2009-06-19 23:12 ` Sage Weil
2009-06-19 23:19 ` Greg KH
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1245450702-31343-19-git-send-email-sage@newdream.net \
--to=sage@newdream.net \
--cc=greg@kroah.com \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).