From: Mike Christie <mchristi@redhat.com>
To: martin.petersen@oracle.com, jejb@linux.vnet.ibm.com,
linux-scsi@vger.kernel.org, target-devel@vger.kernel.org,
nab@linux-iscsi.org
Cc: Mike Christie <mchristi@redhat.com>
Subject: [PATCH 17/19] tcmu: make ring buffer timer configurable
Date: Mon, 30 Oct 2017 03:44:37 +0000 [thread overview]
Message-ID: <1509335079-5276-18-git-send-email-mchristi@redhat.com> (raw)
In-Reply-To: <1509335079-5276-1-git-send-email-mchristi@redhat.com>
This adds a timer, qfull_time_out, that controls how long a
device will wait for ring buffer space to open before
failing the commands in the queue. It is useful to separate
this timer from the cmd_time_out and default 30 sec one,
because for HA setups cmd_time_out may be disbled and 30
seconds is too long to wait when some OSs like ESX will
timeout commands after as little as 8 - 15 seconds.
Signed-off-by: Mike Christie <mchristi@redhat.com>
---
drivers/target/target_core_user.c | 131 +++++++++++++++++++++++++++++---------
1 file changed, 101 insertions(+), 30 deletions(-)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 80dd17a..92eb918 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -142,8 +142,12 @@ struct tcmu_dev {
struct idr commands;
- struct timer_list timeout;
+ struct timer_list cmd_timer;
unsigned int cmd_time_out;
+
+ struct timer_list qfull_timer;
+ int qfull_time_out;
+
struct list_head timedout_entry;
spinlock_t nl_cmd_lock;
@@ -740,18 +744,14 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
return command_size;
}
-static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
+static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
+ struct timer_list *timer)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
- unsigned long tmo = udev->cmd_time_out;
int cmd_id;
- /*
- * If it was on the cmdr queue waiting we do not reset the timer
- * for requeues and when it is finally sent to userspace.
- */
if (tcmu_cmd->cmd_id)
- return 0;
+ goto setup_timer;
cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
if (cmd_id < 0) {
@@ -760,23 +760,38 @@ static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
}
tcmu_cmd->cmd_id = cmd_id;
- if (!tmo)
- tmo = TCMU_TIME_OUT;
-
pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
udev->name, tmo / MSEC_PER_SEC);
+setup_timer:
+ if (!tmo)
+ return 0;
+
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
- mod_timer(&udev->timeout, tcmu_cmd->deadline);
+ mod_timer(timer, tcmu_cmd->deadline);
return 0;
}
static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
+ unsigned int tmo;
int ret;
- ret = tcmu_setup_cmd_timer(tcmu_cmd);
+ /*
+ * For backwards compat if qfull_time_out is not set use
+ * cmd_time_out and if that's not set use the default time out.
+ */
+ if (!udev->qfull_time_out)
+ return -ETIMEDOUT;
+ else if (udev->qfull_time_out > 0)
+ tmo = udev->qfull_time_out;
+ else if (udev->cmd_time_out)
+ tmo = udev->cmd_time_out;
+ else
+ tmo = TCMU_TIME_OUT;
+
+ ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
if (ret)
return ret;
@@ -900,7 +915,8 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
}
entry->req.iov_bidi_cnt = iov_cnt;
- ret = tcmu_setup_cmd_timer(tcmu_cmd);
+ ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out,
+ &udev->cmd_timer);
if (ret) {
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
*scsi_err = TCM_OUT_OF_RESOURCES;
@@ -1046,14 +1062,19 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
handled++;
}
- if (mb->cmd_tail = mb->cmd_head && list_empty(&udev->cmdr_queue)) {
- del_timer(&udev->timeout);
- /*
- * not more pending or waiting commands so try to reclaim
- * blocks if needed.
- */
- if (atomic_read(&global_db_count) > TCMU_GLOBAL_MAX_BLOCKS)
- schedule_delayed_work(&tcmu_unmap_work, 0);
+ if (mb->cmd_tail = mb->cmd_head) {
+ /* no more pending commands */
+ del_timer(&udev->cmd_timer);
+
+ if (list_empty(&udev->cmdr_queue)) {
+ /*
+ * no more pending or waiting commands so try to
+ * reclaim blocks if needed.
+ */
+ if (atomic_read(&global_db_count) >
+ TCMU_GLOBAL_MAX_BLOCKS)
+ schedule_delayed_work(&tcmu_unmap_work, 0);
+ }
}
return handled;
@@ -1074,13 +1095,15 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
return 0;
is_running = list_empty(&cmd->cmdr_queue_entry);
- pr_debug("Timing out cmd %u on dev %s that is %s.\n",
- id, udev->name, is_running ? "inflight" : "queued");
-
- se_cmd = cmd->se_cmd;
- cmd->se_cmd = NULL;
if (is_running) {
+ /*
+ * If cmd_time_out is disabled but qfull is set deadline
+ * will only reflect the qfull timeout. Ignore it.
+ */
+ if (!udev->cmd_time_out)
+ return 0;
+
set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
/*
* target_complete_cmd will translate this to LUN COMM FAILURE
@@ -1093,6 +1116,12 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
tcmu_free_cmd(cmd);
scsi_status = SAM_STAT_TASK_SET_FULL;
}
+
+ pr_debug("Timing out cmd %u on dev %s that is %s.\n",
+ id, udev->name, is_running ? "inflight" : "queued");
+
+ se_cmd = cmd->se_cmd;
+ cmd->se_cmd = NULL;
target_complete_cmd(se_cmd, scsi_status);
return 0;
}
@@ -1101,7 +1130,7 @@ static void tcmu_device_timedout(unsigned long data)
{
struct tcmu_dev *udev = (struct tcmu_dev *)data;
- pr_debug("%s cmd timeout has expired\n", udev->name);
+ pr_debug("%s cmd/qfull timeout has expired\n", udev->name);
spin_lock(&timed_out_udevs_lock);
if (list_empty(&udev->timedout_entry))
@@ -1148,6 +1177,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
udev->hba = hba;
udev->cmd_time_out = TCMU_TIME_OUT;
+ udev->qfull_time_out = -1;
mutex_init(&udev->cmdr_lock);
@@ -1155,7 +1185,10 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&udev->cmdr_queue);
idr_init(&udev->commands);
- setup_timer(&udev->timeout, tcmu_device_timedout,
+ setup_timer(&udev->qfull_timer, tcmu_device_timedout,
+ (unsigned long)udev);
+
+ setup_timer(&udev->cmd_timer, tcmu_device_timedout,
(unsigned long)udev);
init_waitqueue_head(&udev->nl_cmd_wq);
@@ -1211,6 +1244,8 @@ static bool run_cmdr_queue(struct tcmu_dev *udev)
goto done;
}
}
+ if (list_empty(&udev->cmdr_queue))
+ del_timer(&udev->qfull_timer);
done:
return drained;
}
@@ -1710,7 +1745,8 @@ static void tcmu_destroy_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
- del_timer_sync(&udev->timeout);
+ del_timer_sync(&udev->cmd_timer);
+ del_timer_sync(&udev->qfull_timer);
mutex_lock(&root_udev_mutex);
list_del(&udev->node);
@@ -1890,6 +1926,40 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
}
CONFIGFS_ATTR(tcmu_, cmd_time_out);
+static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
+ udev->qfull_time_out :
+ udev->qfull_time_out / MSEC_PER_SEC);
+}
+
+static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ s32 val;
+ int ret;
+
+ ret = kstrtos32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val >= 0) {
+ udev->qfull_time_out = val * MSEC_PER_SEC;
+ } else {
+ printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
+ return -EINVAL;
+ }
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, qfull_time_out);
+
static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
@@ -2035,6 +2105,7 @@ static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
static struct configfs_attribute *tcmu_attrib_attrs[] = {
&tcmu_attr_cmd_time_out,
+ &tcmu_attr_qfull_time_out,
&tcmu_attr_dev_config,
&tcmu_attr_dev_size,
&tcmu_attr_emulate_write_cache,
--
1.8.3.1
next prev parent reply other threads:[~2017-10-30 3:44 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-10-30 3:44 [PATCH 00/19] target/target_core_user: changes for 4.16 Mike Christie
2017-10-30 3:44 ` [PATCH 01/19] tcmu: fix crash when removing the tcmu device v4 Mike Christie
2017-10-30 3:44 ` [PATCH 02/19] tcmu: Add netlink command reply supported option for each device Mike Christie
2017-10-30 3:44 ` [PATCH 03/19] tcmu: Use macro to call container_of in tcmu_cmd_time_out_show Mike Christie
2017-10-30 3:44 ` [PATCH 04/19] tcmu: fix double se_cmd completion Mike Christie
2017-10-30 3:44 ` [PATCH 05/19] tcmu: merge common block release code Mike Christie
2017-10-30 3:44 ` [PATCH 06/19] tcmu: split unmap_thread_fn Mike Christie
2017-10-30 3:44 ` [PATCH 07/19] tcmu: fix unmap thread race Mike Christie
2017-10-30 3:44 ` [PATCH 08/19] tcmu: move expired command completion to unmap thread Mike Christie
2017-10-30 3:44 ` [PATCH 09/19] tcmu: remove commands_lock Mike Christie
2017-10-30 3:44 ` [PATCH 10/19] tcmu: release blocks for partially setup cmds Mike Christie
2017-10-30 3:44 ` [PATCH 11/19] tcmu: simplify scatter_data_area error handling Mike Christie
2017-10-30 3:44 ` [PATCH 12/19] tcmu: fix free block calculation Mike Christie
2017-10-30 3:44 ` [PATCH 13/19] tcmu: clean up the scatter helper Mike Christie
2017-10-30 3:44 ` [PATCH 14/19] tcmu: prep queue_cmd_ring to be used by unmap wq Mike Christie
2017-10-30 3:44 ` [PATCH 15/19] tcmu: simplify dbi thresh handling Mike Christie
2017-10-30 3:44 ` [PATCH 16/19] tcmu: don't block submitting context for block waits Mike Christie
2017-10-30 3:44 ` Mike Christie [this message]
2017-10-30 3:44 ` [PATCH 18/19] tcmu: allow max block and global max blocks to be settable Mike Christie
2017-10-30 3:44 ` [PATCH 19/19] target: return SAM_STAT_TASK_SET_FULL for TCM_OUT_OF_RESOURCES Mike Christie
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1509335079-5276-18-git-send-email-mchristi@redhat.com \
--to=mchristi@redhat.com \
--cc=jejb@linux.vnet.ibm.com \
--cc=linux-scsi@vger.kernel.org \
--cc=martin.petersen@oracle.com \
--cc=nab@linux-iscsi.org \
--cc=target-devel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).