public inbox for linux-scsi@vger.kernel.org
 help / color / mirror / Atom feed
From: Douglas Gilbert <dgilbert@interlog.com>
To: linux-scsi@vger.kernel.org
Cc: martin.petersen@oracle.com, jejb@linux.vnet.ibm.com,
	hare@suse.de, bvanassche@acm.org
Subject: [PATCH v24 11/46] sg: change rwlock to spinlock
Date: Sun, 10 Apr 2022 22:28:01 -0400	[thread overview]
Message-ID: <20220411022836.11871-12-dgilbert@interlog.com> (raw)
In-Reply-To: <20220411022836.11871-1-dgilbert@interlog.com>

A reviewer suggested that the extra overhead associated with a
rw lock compared to a spinlock was not worth it for short,
oft-used critcal sections.

So the rwlock on the request list/array is changed to a spinlock.
The head of that list is in the owning sf file descriptor object.

Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Douglas Gilbert <dgilbert@interlog.com>
---
 drivers/scsi/sg.c | 58 +++++++++++++++++++++++------------------------
 1 file changed, 29 insertions(+), 29 deletions(-)

diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index d83b75b60aab..71ab1c8c56b4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -144,7 +144,7 @@ struct sg_fd {		/* holds the state of a file descriptor */
 	struct list_head sfd_entry;	/* member sg_device::sfds list */
 	struct sg_device *parentdp;	/* owning device */
 	wait_queue_head_t read_wait;	/* queue read until command done */
-	rwlock_t rq_list_lock;	/* protect access to list in req_arr */
+	spinlock_t rq_list_lock;	/* protect access to list in req_arr */
 	struct mutex f_mutex;	/* protect against changes in this fd */
 	int timeout;		/* defaults to SG_DEFAULT_TIMEOUT      */
 	int timeout_user;	/* defaults to SG_DEFAULT_TIMEOUT_USER */
@@ -742,17 +742,17 @@ sg_get_rq_mark(struct sg_fd *sfp, int pack_id)
 	struct sg_request *resp;
 	unsigned long iflags;
 
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	list_for_each_entry(resp, &sfp->rq_list, entry) {
 		/* look for requests that are ready + not SG_IO owned */
 		if (resp->done == 1 && !resp->sg_io_owned &&
 		    (-1 == pack_id || resp->header.pack_id == pack_id)) {
 			resp->done = 2;	/* guard against other readers */
-			write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+			spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 			return resp;
 		}
 	}
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 	return NULL;
 }
 
@@ -807,9 +807,9 @@ srp_done(struct sg_fd *sfp, struct sg_request *srp)
 	unsigned long flags;
 	int ret;
 
-	read_lock_irqsave(&sfp->rq_list_lock, flags);
+	spin_lock_irqsave(&sfp->rq_list_lock, flags);
 	ret = srp->done;
-	read_unlock_irqrestore(&sfp->rq_list_lock, flags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, flags);
 	return ret;
 }
 
@@ -1075,15 +1075,15 @@ sg_ioctl_common(struct file *filp, struct sg_device *sdp, struct sg_fd *sfp,
 			(srp_done(sfp, srp) || SG_IS_DETACHING(sdp)));
 		if (SG_IS_DETACHING(sdp))
 			return -ENODEV;
-		write_lock_irq(&sfp->rq_list_lock);
+		spin_lock_irq(&sfp->rq_list_lock);
 		if (srp->done) {
 			srp->done = 2;
-			write_unlock_irq(&sfp->rq_list_lock);
+			spin_unlock_irq(&sfp->rq_list_lock);
 			result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
 			return (result < 0) ? result : 0;
 		}
 		srp->orphan = 1;
-		write_unlock_irq(&sfp->rq_list_lock);
+		spin_unlock_irq(&sfp->rq_list_lock);
 		return result;	/* -ERESTARTSYS because signal hit process */
 	case SG_SET_TIMEOUT:
 		result = get_user(val, ip);
@@ -1135,15 +1135,15 @@ sg_ioctl_common(struct file *filp, struct sg_device *sdp, struct sg_fd *sfp,
 		sfp->force_packid = val ? 1 : 0;
 		return 0;
 	case SG_GET_PACK_ID:
-		read_lock_irqsave(&sfp->rq_list_lock, iflags);
+		spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 		list_for_each_entry(srp, &sfp->rq_list, entry) {
 			if ((1 == srp->done) && (!srp->sg_io_owned)) {
-				read_unlock_irqrestore(&sfp->rq_list_lock,
+				spin_unlock_irqrestore(&sfp->rq_list_lock,
 						       iflags);
 				return put_user(srp->header.pack_id, ip);
 			}
 		}
-		read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+		spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 		return put_user(-1, ip);
 	case SG_GET_NUM_WAITING:
 		return put_user(atomic_read(&sfp->waiting), ip);
@@ -1212,9 +1212,9 @@ sg_ioctl_common(struct file *filp, struct sg_device *sdp, struct sg_fd *sfp,
 					GFP_KERNEL);
 			if (!rinfo)
 				return -ENOMEM;
-			read_lock_irqsave(&sfp->rq_list_lock, iflags);
+			spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 			sg_fill_request_table(sfp, rinfo);
-			read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+			spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 	#ifdef CONFIG_COMPAT
 			if (in_compat_syscall())
 				result = put_compat_request_table(p, rinfo);
@@ -1509,7 +1509,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
 	srp->rq = NULL;
 	blk_mq_free_request(rq);
 
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	if (unlikely(srp->orphan)) {
 		if (sfp->keep_orphan)
 			srp->sg_io_owned = 0;
@@ -1517,7 +1517,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
 			done = 0;
 	}
 	srp->done = done;
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 
 	if (likely(done)) {
 		/* Now wake up any sg_read() that is waiting for this
@@ -2198,7 +2198,7 @@ sg_setup_req(struct sg_fd *sfp)
 	unsigned long iflags;
 	struct sg_request *rp = sfp->req_arr;
 
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	if (!list_empty(&sfp->rq_list)) {
 		if (!sfp->cmd_q)
 			goto out_unlock;
@@ -2214,10 +2214,10 @@ sg_setup_req(struct sg_fd *sfp)
 	rp->parentfp = sfp;
 	rp->header.duration = jiffies_to_msecs(jiffies);
 	list_add_tail(&rp->entry, &sfp->rq_list);
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 	return rp;
 out_unlock:
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 	return NULL;
 }
 
@@ -2230,13 +2230,13 @@ sg_remove_request(struct sg_fd *sfp, struct sg_request *srp)
 
 	if (!sfp || !srp || list_empty(&sfp->rq_list))
 		return res;
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	if (!list_empty(&srp->entry)) {
 		list_del(&srp->entry);
 		srp->parentfp = NULL;
 		res = 1;
 	}
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 	return res;
 }
 
@@ -2252,7 +2252,7 @@ sg_add_sfp(struct sg_device *sdp)
 		return ERR_PTR(-ENOMEM);
 
 	init_waitqueue_head(&sfp->read_wait);
-	rwlock_init(&sfp->rq_list_lock);
+	spin_lock_init(&sfp->rq_list_lock);
 	INIT_LIST_HEAD(&sfp->rq_list);
 	kref_init(&sfp->f_ref);
 	mutex_init(&sfp->f_mutex);
@@ -2297,14 +2297,14 @@ sg_remove_sfp_usercontext(struct work_struct *work)
 	unsigned long iflags;
 
 	/* Cleanup any responses which were never read(). */
-	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+	spin_lock_irqsave(&sfp->rq_list_lock, iflags);
 	while (!list_empty(&sfp->rq_list)) {
 		srp = list_first_entry(&sfp->rq_list, struct sg_request, entry);
 		sg_finish_scsi_blk_rq(srp);
 		list_del(&srp->entry);
 		srp->parentfp = NULL;
 	}
-	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+	spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
 
 	if (sfp->reserve.buflen > 0) {
 		SG_LOG(6, sfp, "%s:    buflen=%d, num_sgat=%d\n", __func__,
@@ -2584,9 +2584,9 @@ sg_proc_seq_show_dev(struct seq_file *s, void *v)
 			      scsidp->host->host_no, scsidp->channel,
 			      scsidp->id, scsidp->lun, (int) scsidp->type,
 			      1,
-			      (int) scsidp->queue_depth,
-			      (int) scsi_device_busy(scsidp),
-			      (int) scsi_device_online(scsidp));
+			      (int)scsidp->queue_depth,
+			      (int)scsi_device_busy(scsidp),
+			      (int)scsi_device_online(scsidp));
 	}
 	read_unlock_irqrestore(&sg_index_lock, iflags);
 	return 0;
@@ -2626,7 +2626,7 @@ sg_proc_debug_helper(struct seq_file *s, struct sg_device *sdp)
 	k = 0;
 	list_for_each_entry(fp, &sdp->sfds, sfd_entry) {
 		k++;
-		read_lock(&fp->rq_list_lock); /* irqs already disabled */
+		spin_lock(&fp->rq_list_lock); /* irqs already disabled */
 		seq_printf(s, "   FD(%d): timeout=%dms buflen=%d "
 			   "(res)sgat=%d\n", k,
 			   jiffies_to_msecs(fp->timeout),
@@ -2675,7 +2675,7 @@ sg_proc_debug_helper(struct seq_file *s, struct sg_device *sdp)
 		}
 		if (list_empty(&fp->rq_list))
 			seq_puts(s, "     No requests active\n");
-		read_unlock(&fp->rq_list_lock);
+		spin_unlock(&fp->rq_list_lock);
 	}
 }
 
-- 
2.25.1


  parent reply	other threads:[~2022-04-11  2:29 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-11  2:27 [PATCH v24 00/46] sg: add v4 interface Douglas Gilbert
2022-04-11  2:27 ` [PATCH v24 01/46] sg: move functions around Douglas Gilbert
2022-04-11  2:27 ` [PATCH v24 02/46] sg: remove typedefs, type+formatting cleanup Douglas Gilbert
2022-04-11  2:27 ` [PATCH v24 03/46] sg: sg_log and is_enabled Douglas Gilbert
2022-04-11  2:27 ` [PATCH v24 04/46] sg: rework sg_poll(), minor changes Douglas Gilbert
2022-04-11  2:27 ` [PATCH v24 05/46] sg: bitops in sg_device Douglas Gilbert
2022-04-11  2:27 ` [PATCH v24 06/46] sg: make open count an atomic Douglas Gilbert
2022-04-11  2:27 ` [PATCH v24 07/46] sg: move header to uapi section Douglas Gilbert
2022-04-11  2:27 ` [PATCH v24 08/46] sg: speed sg_poll and sg_get_num_waiting Douglas Gilbert
2022-04-11  2:27 ` [PATCH v24 09/46] sg: sg_allow_if_err_recovery and renames Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 10/46] sg: improve naming Douglas Gilbert
2022-04-11  2:28 ` Douglas Gilbert [this message]
2022-04-11  2:28 ` [PATCH v24 12/46] sg: ioctl handling Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 13/46] sg: split sg_read Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 14/46] sg: sg_common_write add structure for arguments Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 15/46] sg: rework sg_vma_fault Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 16/46] sg: rework sg_mmap Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 17/46] sg: replace sg_allow_access Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 18/46] sg: rework scatter gather handling Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 19/46] sg: introduce request state machine Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 20/46] sg: sg_find_srp_by_id Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 21/46] sg: sg_fill_request_element Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 22/46] sg: printk change %p to %pK Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 23/46] sg: xarray for fds in device Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 24/46] sg: xarray for reqs in fd Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 25/46] sg: replace rq array with xarray Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 26/46] sg: sense buffer rework Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 27/46] sg: add sg v4 interface support Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 28/46] sg: rework debug info Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 29/46] sg: add 8 byte SCSI LUN to sg_scsi_id Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 30/46] sg: expand sg_comm_wr_t Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 31/46] sg: add sg_iosubmit_v3 and sg_ioreceive_v3 ioctls Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 32/46] sg: add some __must_hold macros Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 33/46] sg: move procfs objects to avoid forward decls Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 34/46] sg: protect multiple receivers Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 35/46] sg: first debugfs support Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 36/46] sg: rework mmap support Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 37/46] sg: defang allow_dio Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 38/46] sg: warn v3 write system call users Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 39/46] sg: add mmap_sz tracking Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 40/46] sg: remove rcv_done request state Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 41/46] sg: track lowest inactive and await indexes Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 42/46] sg: remove unit attention check for device changed Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 43/46] sg: no_dxfer: move to/from kernel buffers Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 44/46] sg: add bio_poll support Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 45/46] sg: add statistics similar to st Douglas Gilbert
2022-04-11  2:28 ` [PATCH v24 46/46] sg: bump version to 4.0.13 Douglas Gilbert

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220411022836.11871-12-dgilbert@interlog.com \
    --to=dgilbert@interlog.com \
    --cc=bvanassche@acm.org \
    --cc=hare@suse.de \
    --cc=jejb@linux.vnet.ibm.com \
    --cc=linux-scsi@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox