* [patch] 1/5 scsi-locking-2.5 single_lun store scsi_device pointer
@ 2003-04-09 23:46 Patrick Mansfield
2003-04-09 23:47 ` [patch] 2/5 scsi-locking-2.5 remove lock hierarchy Patrick Mansfield
0 siblings, 1 reply; 5+ messages in thread
From: Patrick Mansfield @ 2003-04-09 23:46 UTC (permalink / raw)
To: James Bottomley, linux-scsi
Change single_lun code to use a struct scsi_device *, so that we do not
need an sdev (or queue_lock) while checking if a single_lun target is in
use by a particular scsi_device.
diff -purN -X /home/patman/dontdiff sl-base-2.5/drivers/scsi/scsi.h 01_slun_mod/drivers/scsi/scsi.h
--- sl-base-2.5/drivers/scsi/scsi.h Wed Apr 9 11:34:36 2003
+++ 01_slun_mod/drivers/scsi/scsi.h Wed Apr 9 12:39:15 2003
@@ -532,10 +532,11 @@ extern int scsi_dev_info_list_add_str(ch
/*
* scsi_target: representation of a scsi target, for now, this is only
- * used for single_lun devices.
+ * used for single_lun devices. If no one has active IO to the target,
+ * starget_sdev_user is NULL, else it points to the active sdev.
*/
struct scsi_target {
- unsigned int starget_busy;
+ struct scsi_device *starget_sdev_user;
unsigned int starget_refcnt;
};
diff -purN -X /home/patman/dontdiff sl-base-2.5/drivers/scsi/scsi_lib.c 01_slun_mod/drivers/scsi/scsi_lib.c
--- sl-base-2.5/drivers/scsi/scsi_lib.c Wed Apr 9 11:34:37 2003
+++ 01_slun_mod/drivers/scsi/scsi_lib.c Wed Apr 9 12:39:15 2003
@@ -327,9 +327,9 @@ void scsi_setup_cmd_retry(struct scsi_cm
}
/*
- * Called for single_lun devices on IO completion. Clear starget_busy, and
- * Call __blk_run_queue for all the scsi_devices on the target - including
- * current_sdev first.
+ * Called for single_lun devices on IO completion. Clear starget_sdev_user,
+ * and call __blk_run_queue for all the scsi_devices on the target -
+ * including current_sdev first.
*
* Called with *no* scsi locks held.
*/
@@ -338,19 +338,33 @@ static void scsi_single_lun_run(struct s
struct scsi_device *sdev;
unsigned int flags, flags2;
- spin_lock_irqsave(current_sdev->request_queue->queue_lock, flags2);
spin_lock_irqsave(current_sdev->host->host_lock, flags);
- WARN_ON(!current_sdev->sdev_target->starget_busy);
- if (current_sdev->device_busy == 0)
- current_sdev->sdev_target->starget_busy = 0;
+ WARN_ON(!current_sdev->sdev_target->starget_sdev_user);
+ current_sdev->sdev_target->starget_sdev_user = NULL;
spin_unlock_irqrestore(current_sdev->host->host_lock, flags);
/*
* Call __blk_run_queue for all LUNs on the target, starting with
- * current_sdev.
+ * current_sdev. We race with others (to set starget_sdev_user),
+ * but in most cases, we will be first. Ideally, each LU on the
+ * target would get some limited time or requests on the target.
*/
+ spin_lock_irqsave(current_sdev->request_queue->queue_lock, flags2);
__blk_run_queue(current_sdev->request_queue);
spin_unlock_irqrestore(current_sdev->request_queue->queue_lock, flags2);
+
+ spin_lock_irqsave(current_sdev->host->host_lock, flags);
+ if (current_sdev->sdev_target->starget_sdev_user) {
+ /*
+ * After unlock, this races with anyone clearing
+ * starget_sdev_user, but we (should) always enter this
+ * function again, avoiding any problems.
+ */
+ spin_unlock_irqrestore(current_sdev->host->host_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(current_sdev->host->host_lock, flags);
+
list_for_each_entry(sdev, ¤t_sdev->same_target_siblings,
same_target_siblings) {
spin_lock_irqsave(sdev->request_queue->queue_lock, flags2);
@@ -1158,8 +1172,8 @@ static void scsi_request_fn(request_queu
if (!scsi_host_queue_ready(q, shost, sdev))
goto after_host_lock;
- if (sdev->single_lun && !sdev->device_busy &&
- sdev->sdev_target->starget_busy)
+ if (sdev->single_lun && sdev->sdev_target->starget_sdev_user &&
+ (sdev->sdev_target->starget_sdev_user != sdev))
goto after_host_lock;
/*
@@ -1198,7 +1212,7 @@ static void scsi_request_fn(request_queu
blkdev_dequeue_request(req);
if (sdev->single_lun)
- sdev->sdev_target->starget_busy = 1;
+ sdev->sdev_target->starget_sdev_user = sdev;
shost->host_busy++;
spin_unlock_irqrestore(shost->host_lock, flags);
diff -purN -X /home/patman/dontdiff sl-base-2.5/drivers/scsi/scsi_scan.c 01_slun_mod/drivers/scsi/scsi_scan.c
--- sl-base-2.5/drivers/scsi/scsi_scan.c Wed Apr 9 11:34:37 2003
+++ 01_slun_mod/drivers/scsi/scsi_scan.c Wed Apr 9 12:39:15 2003
@@ -1283,7 +1283,7 @@ static int scsi_add_lun(Scsi_Device *sde
return SCSI_SCAN_NO_RESPONSE;
}
starget->starget_refcnt = 0;
- starget->starget_busy = 0;
+ starget->starget_sdev_user = NULL;
}
starget->starget_refcnt++;
sdev->sdev_target = starget;
^ permalink raw reply [flat|nested] 5+ messages in thread
* [patch] 2/5 scsi-locking-2.5 remove lock hierarchy
2003-04-09 23:46 [patch] 1/5 scsi-locking-2.5 single_lun store scsi_device pointer Patrick Mansfield
@ 2003-04-09 23:47 ` Patrick Mansfield
2003-04-09 23:47 ` [patch] 3/5 scsi-locking-2.5 prevent looping when processing starved queues Patrick Mansfield
0 siblings, 1 reply; 5+ messages in thread
From: Patrick Mansfield @ 2003-04-09 23:47 UTC (permalink / raw)
To: James Bottomley, linux-scsi
Get rid of the lock hierarchy for queue_lock and host_lock (even for the
single_lun case).
diff -purN -X /home/patman/dontdiff 01_slun_mod/drivers/scsi/scsi_lib.c 02_no_hier/drivers/scsi/scsi_lib.c
--- 01_slun_mod/drivers/scsi/scsi_lib.c Wed Apr 9 12:39:15 2003
+++ 02_no_hier/drivers/scsi/scsi_lib.c Wed Apr 9 12:39:20 2003
@@ -1088,9 +1088,10 @@ static inline int scsi_dev_queue_ready(s
/*
* scsi_host_queue_ready: if we can send requests to shost, return 1 else
- * return 0.
+ * return 0. We must end up running the queue again whenever 0 is
+ * returned, else IO can hang.
*
- * Called with queue_lock and host_lock held.
+ * Called with host_lock held.
*/
static inline int scsi_host_queue_ready(struct request_queue *q,
struct Scsi_Host *shost,
@@ -1157,41 +1158,54 @@ static void scsi_request_fn(request_queu
if (blk_queue_plugged(q))
goto completed;
+ if (blk_queue_empty(q))
+ goto completed;
+
/*
* get next queueable request. We do this early to make sure
* that the request is fully prepared even if we cannot
- * accept it. If there is no request, we'll detect this
- * lower down.
+ * accept it.
*/
req = elv_next_request(q);
- if (!scsi_dev_queue_ready(q, sdev))
+ if (!req) {
+ /*
+ * If the device is busy, a returning I/O will
+ * restart the queue. Otherwise, we have to plug
+ * the queue
+ */
+ if (sdev->device_busy == 0)
+ blk_plug_device(q);
goto completed;
+ }
- spin_lock_irqsave(shost->host_lock, flags);
- if (!scsi_host_queue_ready(q, shost, sdev))
- goto after_host_lock;
-
- if (sdev->single_lun && sdev->sdev_target->starget_sdev_user &&
- (sdev->sdev_target->starget_sdev_user != sdev))
- goto after_host_lock;
+ if (!scsi_dev_queue_ready(q, sdev))
+ goto completed;
/*
- * If we couldn't find a request that could be queued, then we
- * can also quit.
+ * Remove the request from the request list.
*/
- if (blk_queue_empty(q))
- goto after_host_lock;
+ if (!(blk_queue_tagged(q) && (blk_queue_start_tag(q, req) == 0)))
+ blkdev_dequeue_request(req);
- if (!req) {
- /* If the device is busy, a returning I/O
- * will restart the queue. Otherwise, we have
- * to plug the queue */
- if (sdev->device_busy == 1)
- blk_plug_device(q);
- goto after_host_lock;
+ sdev->device_busy++;
+ spin_unlock_irq(q->queue_lock);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (!scsi_host_queue_ready(q, shost, sdev))
+ goto host_lock_held;
+
+ if (sdev->single_lun) {
+ if (sdev->sdev_target->starget_sdev_user &&
+ (sdev->sdev_target->starget_sdev_user != sdev))
+ goto host_lock_held;
+ else
+ sdev->sdev_target->starget_sdev_user = sdev;
}
+ shost->host_busy++;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
cmd = req->special;
/*
@@ -1201,26 +1215,6 @@ static void scsi_request_fn(request_queu
BUG_ON(!cmd);
/*
- * Finally, before we release the lock, we copy the
- * request to the command block, and remove the
- * request from the request list. Note that we always
- * operate on the queue head - there is absolutely no
- * reason to search the list, because all of the
- * commands in this queue are for the same device.
- */
- if (!(blk_queue_tagged(q) && (blk_queue_start_tag(q, req) == 0)))
- blkdev_dequeue_request(req);
-
- if (sdev->single_lun)
- sdev->sdev_target->starget_sdev_user = sdev;
-
- shost->host_busy++;
- spin_unlock_irqrestore(shost->host_lock, flags);
-
- sdev->device_busy++;
- spin_unlock_irq(q->queue_lock);
-
- /*
* Finally, initialize any error handling parameters, and set up
* the timers for timeouts.
*/
@@ -1240,8 +1234,21 @@ static void scsi_request_fn(request_queu
completed:
return;
-after_host_lock:
+host_lock_held:
spin_unlock_irqrestore(shost->host_lock, flags);
+ /*
+ * lock q, handle tag, requeue req, and decrement device_busy. We
+ * must return with queue_lock held.
+ *
+ * Decrementing device_busy without checking it is OK, as all such
+ * cases (host limits or settings) should run the queue at some
+ * later time.
+ */
+ spin_lock_irq(q->queue_lock);
+ if (blk_rq_tagged(req))
+ blk_queue_end_tag(q, req);
+ __elv_add_request(q, req, 0, 0);
+ sdev->device_busy--;
}
u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
^ permalink raw reply [flat|nested] 5+ messages in thread
* [patch] 3/5 scsi-locking-2.5 prevent looping when processing starved queues
2003-04-09 23:47 ` [patch] 2/5 scsi-locking-2.5 remove lock hierarchy Patrick Mansfield
@ 2003-04-09 23:47 ` Patrick Mansfield
2003-04-09 23:47 ` [patch] 4/5 scsi-locking-2.5 list_del starved_entry plus use GFP_ATOMIC Patrick Mansfield
0 siblings, 1 reply; 5+ messages in thread
From: Patrick Mansfield @ 2003-04-09 23:47 UTC (permalink / raw)
To: James Bottomley, linux-scsi
Ensure that we cannot loop forever (however unlikely) when processing the
starved queues.
diff -purN -X /home/patman/dontdiff 02_no_hier/drivers/scsi/scsi_lib.c 03_no_loop/drivers/scsi/scsi_lib.c
--- 02_no_hier/drivers/scsi/scsi_lib.c Wed Apr 9 12:39:20 2003
+++ 03_no_loop/drivers/scsi/scsi_lib.c Wed Apr 9 12:39:22 2003
@@ -470,6 +470,13 @@ void scsi_queue_next_request(request_que
spin_unlock_irqrestore(sdev2->request_queue->queue_lock, flags);
spin_lock_irqsave(shost->host_lock, flags);
+ if (unlikely(!list_empty(&sdev->starved_entry)))
+ /*
+ * sdev lost a race, and was put back on the
+ * starved list. This is unlikely but without this
+ * in theory we could loop forever.
+ */
+ break;
}
spin_unlock_irqrestore(shost->host_lock, flags);
^ permalink raw reply [flat|nested] 5+ messages in thread
* [patch] 4/5 scsi-locking-2.5 list_del starved_entry plus use GFP_ATOMIC
2003-04-09 23:47 ` [patch] 3/5 scsi-locking-2.5 prevent looping when processing starved queues Patrick Mansfield
@ 2003-04-09 23:47 ` Patrick Mansfield
2003-04-09 23:48 ` [patch] 5/5 scsi-locking-2.5 remove extra sdev2, remove extra logging Patrick Mansfield
0 siblings, 1 reply; 5+ messages in thread
From: Patrick Mansfield @ 2003-04-09 23:47 UTC (permalink / raw)
To: James Bottomley, linux-scsi
list_del the starved_entry when sdev goes away.
Use GFP_ATOMIC when allocating starget, since we hold a lock.
diff -purN -X /home/patman/dontdiff 03_no_loop/drivers/scsi/scsi_scan.c 04_scan_changes/drivers/scsi/scsi_scan.c
--- 03_no_loop/drivers/scsi/scsi_scan.c Wed Apr 9 12:39:15 2003
+++ 04_scan_changes/drivers/scsi/scsi_scan.c Wed Apr 9 13:37:02 2003
@@ -489,13 +489,14 @@ static void scsi_free_sdev(struct scsi_d
sdev->host->hostt->slave_destroy(sdev);
if (sdev->inquiry)
kfree(sdev->inquiry);
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ list_del(&sdev->starved_entry);
if (sdev->single_lun) {
- spin_lock_irqsave(sdev->host->host_lock, flags);
sdev->sdev_target->starget_refcnt--;
if (sdev->sdev_target->starget_refcnt == 0)
kfree(sdev->sdev_target);
- spin_unlock_irqrestore(sdev->host->host_lock, flags);
}
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
kfree(sdev);
}
@@ -1275,7 +1276,7 @@ static int scsi_add_lun(Scsi_Device *sde
}
}
if (!starget) {
- starget = kmalloc(sizeof(*starget), GFP_KERNEL);
+ starget = kmalloc(sizeof(*starget), GFP_ATOMIC);
if (!starget) {
printk(ALLOC_FAILURE_MSG, __FUNCTION__);
spin_unlock_irqrestore(sdev->host->host_lock,
^ permalink raw reply [flat|nested] 5+ messages in thread
* [patch] 5/5 scsi-locking-2.5 remove extra sdev2, remove extra logging
2003-04-09 23:47 ` [patch] 4/5 scsi-locking-2.5 list_del starved_entry plus use GFP_ATOMIC Patrick Mansfield
@ 2003-04-09 23:48 ` Patrick Mansfield
0 siblings, 0 replies; 5+ messages in thread
From: Patrick Mansfield @ 2003-04-09 23:48 UTC (permalink / raw)
To: James Bottomley, linux-scsi
Remove unneeded sdev2 variable.
Remove extra log message.
diff -purN -X /home/patman/dontdiff 04_scan_changes/drivers/scsi/scsi_lib.c 05_cleanup/drivers/scsi/scsi_lib.c
--- 04_scan_changes/drivers/scsi/scsi_lib.c Wed Apr 9 12:39:22 2003
+++ 05_cleanup/drivers/scsi/scsi_lib.c Wed Apr 9 12:39:32 2003
@@ -411,7 +411,7 @@ static void scsi_single_lun_run(struct s
*/
void scsi_queue_next_request(request_queue_t *q, struct scsi_cmnd *cmd)
{
- struct scsi_device *sdev, *sdev2;
+ struct scsi_device *sdev;
struct Scsi_Host *shost;
unsigned long flags;
@@ -460,14 +460,14 @@ void scsi_queue_next_request(request_que
* scsi_request_fn must get the host_lock before checking
* or modifying starved_list or starved_entry.
*/
- sdev2 = list_entry(shost->starved_list.next,
+ sdev = list_entry(shost->starved_list.next,
struct scsi_device, starved_entry);
- list_del_init(&sdev2->starved_entry);
+ list_del_init(&sdev->starved_entry);
spin_unlock_irqrestore(shost->host_lock, flags);
- spin_lock_irqsave(sdev2->request_queue->queue_lock, flags);
- __blk_run_queue(sdev2->request_queue);
- spin_unlock_irqrestore(sdev2->request_queue->queue_lock, flags);
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ __blk_run_queue(sdev->request_queue);
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
spin_lock_irqsave(shost->host_lock, flags);
if (unlikely(!list_empty(&sdev->starved_entry)))
@@ -1123,15 +1123,7 @@ static inline int scsi_host_queue_ready(
return 0;
if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
shost->host_blocked || shost->host_self_blocked) {
- SCSI_LOG_MLQUEUE(3,
- printk("add starved dev <%d,%d,%d,%d>; host "
- "limit %d, busy %d, blocked %d selfblocked %d\n",
- sdev->host->host_no, sdev->channel,
- sdev->id, sdev->lun,
- shost->can_queue, shost->host_busy,
- shost->host_blocked, shost->host_self_blocked));
- list_add_tail(&sdev->starved_entry,
- &shost->starved_list);
+ list_add_tail(&sdev->starved_entry, &shost->starved_list);
return 0;
}
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2003-04-09 23:40 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2003-04-09 23:46 [patch] 1/5 scsi-locking-2.5 single_lun store scsi_device pointer Patrick Mansfield
2003-04-09 23:47 ` [patch] 2/5 scsi-locking-2.5 remove lock hierarchy Patrick Mansfield
2003-04-09 23:47 ` [patch] 3/5 scsi-locking-2.5 prevent looping when processing starved queues Patrick Mansfield
2003-04-09 23:47 ` [patch] 4/5 scsi-locking-2.5 list_del starved_entry plus use GFP_ATOMIC Patrick Mansfield
2003-04-09 23:48 ` [patch] 5/5 scsi-locking-2.5 remove extra sdev2, remove extra logging Patrick Mansfield
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox