From: Dan Williams <dan.j.williams@intel.com>
To: jgarzik@pobox.com, JBottomley@parallels.com
Cc: Len Brown <len.brown@intel.com>,
linux-scsi@vger.kernel.org, Meelis Roos <mroos@linux.ee>,
Eldad Zack <eldadzack@gmail.com>,
"Rafael J. Wysocki" <rjw@sisk.pl>,
linux-ide@vger.kernel.org,
Arjan van de Ven <arjan@linux.intel.com>,
Eldad Zack <eldad@fogrefinery.com>
Subject: [set4 resend PATCH 2/5] async: make async_synchronize_full() flush all work regardless of domain
Date: Mon, 09 Jul 2012 19:33:30 -0700 [thread overview]
Message-ID: <20120710023330.26249.5894.stgit@dwillia2-linux.jf.intel.com> (raw)
In-Reply-To: <20120710023241.26249.13718.stgit@dwillia2-linux.jf.intel.com>
In response to an async related regression James noted:
"My theory is that this is an init problem: The assumption in a lot of
our code is that async_synchronize_full() waits for everything ... even
the domain specific async schedules, which isn't true."
...so make this assumption true.
Each domain, including the default one, registers itself on a global domain
list when work is scheduled. Once all entries complete it exits that
list. Waiting for the list to be empty syncs all in-flight work across
all domains.
Domains can opt-out of global syncing if they are declared as exclusive
ASYNC_DOMAIN_EXCLUSIVE(). All stack-based domains have been declared
exclusive since the domain may go out of scope as soon as the last work
item completes.
Statically declared domains are mostly ok, but async_unregister_domain()
is there to close any theoretical races with pending
async_synchronize_full waiters at module removal time.
Cc: Len Brown <len.brown@intel.com>
Cc: Rafael J. Wysocki <rjw@sisk.pl>
Cc: James Bottomley <JBottomley@parallels.com>
Acked-by: Arjan van de Ven <arjan@linux.intel.com>
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Eldad Zack <eldadzack@gmail.com>
Tested-by: Eldad Zack <eldad@fogrefinery.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
drivers/scsi/scsi.c | 1 +
include/linux/async.h | 1 +
kernel/async.c | 43 +++++++++++++++++++++++++++++++++++++++++--
3 files changed, 43 insertions(+), 2 deletions(-)
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 4cade88..2936b44 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1355,6 +1355,7 @@ static void __exit exit_scsi(void)
scsi_exit_devinfo();
scsi_exit_procfs();
scsi_exit_queue();
+ async_unregister_domain(&scsi_sd_probe_domain);
}
subsys_initcall(init_scsi);
diff --git a/include/linux/async.h b/include/linux/async.h
index 364e7ff..7a24fe9 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -46,6 +46,7 @@ struct async_domain {
extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data);
extern async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
struct async_domain *domain);
+void async_unregister_domain(struct async_domain *domain);
extern void async_synchronize_full(void);
extern void async_synchronize_full_domain(struct async_domain *domain);
extern void async_synchronize_cookie(async_cookie_t cookie);
diff --git a/kernel/async.c b/kernel/async.c
index ba5491d..9d31183 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -63,7 +63,9 @@ static async_cookie_t next_cookie = 1;
static LIST_HEAD(async_pending);
static ASYNC_DOMAIN(async_running);
+static LIST_HEAD(async_domains);
static DEFINE_SPINLOCK(async_lock);
+static DEFINE_MUTEX(async_register_mutex);
struct async_entry {
struct list_head list;
@@ -145,6 +147,8 @@ static void async_run_entry_fn(struct work_struct *work)
/* 3) remove self from the running queue */
spin_lock_irqsave(&async_lock, flags);
list_del(&entry->list);
+ if (running->registered && --running->count == 0)
+ list_del_init(&running->node);
/* 4) free the entry */
kfree(entry);
@@ -187,6 +191,8 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
spin_lock_irqsave(&async_lock, flags);
newcookie = entry->cookie = next_cookie++;
list_add_tail(&entry->list, &async_pending);
+ if (running->registered && running->count++ == 0)
+ list_add_tail(&running->node, &async_domains);
atomic_inc(&entry_count);
spin_unlock_irqrestore(&async_lock, flags);
@@ -236,13 +242,43 @@ EXPORT_SYMBOL_GPL(async_schedule_domain);
*/
void async_synchronize_full(void)
{
+ mutex_lock(&async_register_mutex);
do {
- async_synchronize_cookie(next_cookie);
- } while (!list_empty(&async_running.domain) || !list_empty(&async_pending));
+ struct async_domain *domain = NULL;
+
+ spin_lock_irq(&async_lock);
+ if (!list_empty(&async_domains))
+ domain = list_first_entry(&async_domains, typeof(*domain), node);
+ spin_unlock_irq(&async_lock);
+
+ async_synchronize_cookie_domain(next_cookie, domain);
+ } while (!list_empty(&async_domains));
+ mutex_unlock(&async_register_mutex);
}
EXPORT_SYMBOL_GPL(async_synchronize_full);
/**
+ * async_unregister_domain - ensure no more anonymous waiters on this domain
+ * @domain: idle domain to flush out of any async_synchronize_full instances
+ *
+ * async_synchronize_{cookie|full}_domain() are not flushed since callers
+ * of these routines should know the lifetime of @domain
+ *
+ * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
+ */
+void async_unregister_domain(struct async_domain *domain)
+{
+ mutex_lock(&async_register_mutex);
+ spin_lock_irq(&async_lock);
+ WARN_ON(!domain->registered || !list_empty(&domain->node) ||
+ !list_empty(&domain->domain));
+ domain->registered = 0;
+ spin_unlock_irq(&async_lock);
+ mutex_unlock(&async_register_mutex);
+}
+EXPORT_SYMBOL_GPL(async_unregister_domain);
+
+/**
* async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
* @domain: running list to synchronize on
*
@@ -268,6 +304,9 @@ void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain
{
ktime_t uninitialized_var(starttime), delta, endtime;
+ if (!running)
+ return;
+
if (initcall_debug && system_state == SYSTEM_BOOTING) {
printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
starttime = ktime_get();
next prev parent reply other threads:[~2012-07-10 2:16 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-07-10 2:33 [set4 resend PATCH 0/5] libsas, libata: suspend / resume and "reset once" Dan Williams
2012-07-10 2:33 ` [set4 resend PATCH 1/5] async: introduce 'async_domain' type Dan Williams
2012-07-10 2:33 ` Dan Williams [this message]
2012-07-10 2:33 ` [set4 resend PATCH 3/5] scsi: queue async scan work to an async_schedule domain Dan Williams
2012-07-10 2:33 ` [set4 resend PATCH 4/5] scsi: cleanup usages of scsi_complete_async_scans Dan Williams
2012-07-10 2:33 ` [set4 resend PATCH 5/5] Revert "[SCSI] fix async probe regression" Dan Williams
2012-07-10 3:34 ` [set4 resend PATCH 0/5] libsas, libata: suspend / resume and "reset once" Dan Williams
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20120710023330.26249.5894.stgit@dwillia2-linux.jf.intel.com \
--to=dan.j.williams@intel.com \
--cc=JBottomley@parallels.com \
--cc=arjan@linux.intel.com \
--cc=eldad@fogrefinery.com \
--cc=eldadzack@gmail.com \
--cc=jgarzik@pobox.com \
--cc=len.brown@intel.com \
--cc=linux-ide@vger.kernel.org \
--cc=linux-scsi@vger.kernel.org \
--cc=mroos@linux.ee \
--cc=rjw@sisk.pl \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).