public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Andrea Righi <righiandr@users.sourceforge.net>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Jens Axboe <jens.axboe@oracle.com>
Subject: [RFC][PATCH] per-task I/O throttling
Date: Thu, 10 Jan 2008 23:45:19 +0100 (MET)	[thread overview]
Message-ID: <47869FFE.1050000@users.sourceforge.net> (raw)

Allow to limit the bandwidth of I/O-intensive processes, like backup
tools running in background, large files copy, checksums on huge files,
etc.

This kind of processes can noticeably impact the system responsiveness
for some time and playing with tasks' priority is not always an
acceptable solution.

This patch allows to specify a maximum I/O rate in sectors per second
for each single process via /proc/<PID>/io_throttle (default is zero,
that specify no limit).

Signed-off-by: Andrea Righi <a.righi@cineca.it>
---

diff -urpN linux-2.6.24-rc7/block/ll_rw_blk.c linux-2.6.24-rc7-task-io-throttle/block/ll_rw_blk.c
--- linux-2.6.24-rc7/block/ll_rw_blk.c	2008-01-06 22:45:38.000000000 +0100
+++ linux-2.6.24-rc7-task-io-throttle/block/ll_rw_blk.c	2008-01-10 23:23:41.000000000 +0100
@@ -31,6 +31,7 @@
 #include <linux/blktrace_api.h>
 #include <linux/fault-inject.h>
 #include <linux/scatterlist.h>
+#include <linux/jiffies.h>
 
 /*
  * for max sense size
@@ -3184,6 +3185,41 @@ static inline int bio_check_eod(struct b
 	return 0;
 }
 
+#ifdef CONFIG_TASK_IO_THROTTLE
+static inline void task_io_throttle(int nr_sectors)
+{
+	unsigned long delta;
+	long sleep;
+
+	if (!current->io_throttle) {
+		return;
+	}
+
+	if (!current->io_throttle_timestamp) {
+		current->io_throttle_timestamp = jiffies;
+	}
+	delta = jiffies_to_msecs((long)jiffies -
+			(long)(current->io_throttle_timestamp)) * 1000;
+
+	current->io_throttle_req += nr_sectors;
+
+	sleep = current->io_throttle_req -
+		current->io_throttle * max(delta, (unsigned long)1);
+	if (sleep > 0) {
+		schedule_timeout_uninterruptible(sleep);
+	}
+
+	if (delta) {
+		current->io_throttle_timestamp = jiffies;
+		current->io_throttle_req = 0;
+	}
+}
+#else
+static inline void task_io_throttle(int nr_sectors)
+{
+}
+#endif /* CONFIG_TASK_IO_THROTTLE */
+
 /**
  * generic_make_request: hand a buffer to its device driver for I/O
  * @bio:  The bio describing the location in memory and on the device.
@@ -3221,6 +3257,8 @@ static inline void __generic_make_reques
 	if (bio_check_eod(bio, nr_sectors))
 		goto end_io;
 
+	task_io_throttle(nr_sectors);
+
 	/*
 	 * Resolve the mapping until finished. (drivers are
 	 * still free to implement/resolve their own stacking
diff -urpN linux-2.6.24-rc7/fs/proc/base.c linux-2.6.24-rc7-task-io-throttle/fs/proc/base.c
--- linux-2.6.24-rc7/fs/proc/base.c	2008-01-06 22:45:38.000000000 +0100
+++ linux-2.6.24-rc7-task-io-throttle/fs/proc/base.c	2008-01-10 23:24:43.000000000 +0100
@@ -864,6 +864,56 @@ static const struct file_operations proc
 	.write		= oom_adjust_write,
 };
 
+#ifdef CONFIG_TASK_IO_THROTTLE
+static ssize_t io_throttle_read(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+	char buffer[PROC_NUMBUF];
+	size_t len;
+	unsigned long io_throttle;
+
+	if (!task)
+		return -ESRCH;
+	io_throttle = task->io_throttle;
+	put_task_struct(task);
+
+	len = snprintf(buffer, sizeof(buffer), "%lu\n", io_throttle);
+
+	return simple_read_from_buffer(buf, count, ppos, buffer, len);
+}
+
+static ssize_t io_throttle_write(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct task_struct *task;
+	char buffer[PROC_NUMBUF], *end;
+	unsigned long io_throttle;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count))
+		return -EFAULT;
+	io_throttle = simple_strtoul(buffer, &end, 0);
+	if (*end == '\n')
+		end++;
+	task = get_proc_task(file->f_path.dentry->d_inode);
+	if (!task)
+		return -ESRCH;
+	task->io_throttle = io_throttle;
+	put_task_struct(task);
+	if (end - buffer == 0)
+		return -EIO;
+	return end - buffer;
+}
+
+static const struct file_operations proc_io_throttle_operations = {
+	.read		= io_throttle_read,
+	.write		= io_throttle_write,
+};
+#endif /* CONFIG_TASK_IO_THROTTLE */
+
 #ifdef CONFIG_MMU
 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 				size_t count, loff_t *ppos)
@@ -2250,6 +2300,9 @@ static const struct pid_entry tgid_base_
 #ifdef CONFIG_TASK_IO_ACCOUNTING
 	INF("io",	S_IRUGO, pid_io_accounting),
 #endif
+#ifdef CONFIG_TASK_IO_THROTTLE
+	REG("io_throttle",    S_IRUGO|S_IWUSR, io_throttle),
+#endif
 };
 
 static int proc_tgid_base_readdir(struct file * filp,
diff -urpN linux-2.6.24-rc7/include/linux/sched.h linux-2.6.24-rc7-task-io-throttle/include/linux/sched.h
--- linux-2.6.24-rc7/include/linux/sched.h	2008-01-06 22:45:38.000000000 +0100
+++ linux-2.6.24-rc7-task-io-throttle/include/linux/sched.h	2008-01-10 23:23:41.000000000 +0100
@@ -1167,6 +1167,15 @@ struct task_struct {
 	atomic_t fs_excl;	/* holding fs exclusive resources */
 	struct rcu_head rcu;
 
+#ifdef CONFIG_TASK_IO_THROTTLE
+	/*
+	 * per-process I/O throttle
+	 */
+	unsigned long io_throttle;
+	unsigned long io_throttle_req;
+	unsigned long io_throttle_timestamp;
+#endif
+
 	/*
 	 * cache last used pipe for splice
 	 */
diff -urpN linux-2.6.24-rc7/init/Kconfig linux-2.6.24-rc7-task-io-throttle/init/Kconfig
--- linux-2.6.24-rc7/init/Kconfig	2008-01-06 22:45:38.000000000 +0100
+++ linux-2.6.24-rc7-task-io-throttle/init/Kconfig	2008-01-10 23:23:41.000000000 +0100
@@ -206,6 +206,14 @@ config TASK_IO_ACCOUNTING
 
 	  Say N if unsure.
 
+config TASK_IO_THROTTLE
+	bool "Enable per-task I/O throttling (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  Allow to limit the maximum I/O rate for specific process(es).
+
+	  Say N if unsure.
+
 config USER_NS
 	bool "User Namespaces (EXPERIMENTAL)"
 	default n

             reply	other threads:[~2008-01-11  0:36 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2008-01-10 22:45 Andrea Righi [this message]
2008-01-11  1:50 ` [RFC][PATCH] per-task I/O throttling Bill Davidsen
2008-01-11 10:28   ` Andrea Righi
2008-01-11 14:20     ` Peter Zijlstra
2008-01-11 15:29       ` Andrea Righi
2008-01-11 14:05 ` David Newall
2008-01-11 15:44   ` Andrea Righi
2008-01-16 19:21     ` David Newall
2008-01-11 15:59 ` Balbir Singh
2008-01-11 16:32   ` Andrea Righi
2008-01-12  4:57     ` Valdis.Kletnieks
2008-01-12  9:46       ` Peter Zijlstra
2008-01-12 10:57         ` Balbir Singh
2008-01-12 11:10           ` Peter Zijlstra
2008-01-12 18:01             ` Andrea Righi
2008-01-13  4:46               ` Balbir Singh
2008-01-15 16:49                 ` [RFC][PATCH] per-uid/gid I/O throttling (was Re: [RFC][PATCH] per-task I/O throttling) Andrea Righi
2008-01-11 17:58                   ` Pavel Machek
2008-01-23 15:41                     ` Andrea Righi
2008-01-16 10:45                   ` Balbir Singh
2008-01-16 11:30                     ` Valdis.Kletnieks
2008-01-16 12:05                       ` Balbir Singh
2008-01-16 12:24                         ` Valdis.Kletnieks
2008-01-16 12:58                     ` Andrea Righi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=47869FFE.1050000@users.sourceforge.net \
    --to=righiandr@users.sourceforge.net \
    --cc=jens.axboe@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox