From mboxrd@z Thu Jan 1 00:00:00 1970 From: Bjorn Andersson Subject: [PATCH] rpmsg: glink: Use spinlock in tx path Date: Tue, 13 Feb 2018 11:04:04 -0800 Message-ID: <20180213190404.25026-1-bjorn.andersson@linaro.org> Return-path: Received: from mail-pg0-f67.google.com ([74.125.83.67]:36752 "EHLO mail-pg0-f67.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752400AbeBMTJc (ORCPT ); Tue, 13 Feb 2018 14:09:32 -0500 Received: by mail-pg0-f67.google.com with SMTP id j9so522936pgv.3 for ; Tue, 13 Feb 2018 11:09:32 -0800 (PST) Sender: linux-arm-msm-owner@vger.kernel.org List-Id: linux-arm-msm@vger.kernel.org To: Ohad Ben-Cohen , Bjorn Andersson , Chris Lew , Arun Kumar Neelakantam , Srini Kandagatla Cc: linux-remoteproc@vger.kernel.org, linux-arm-msm@vger.kernel.org, linux-kernel@vger.kernel.org Switch the tx_lock to a spinlock we allow clients to use rpmsg_trysend() from atomic context. In order to allow clients to sleep while waiting for space in the FIFO we release the lock temporarily around the delay; which should be replaced by sending a READ_NOTIF and waiting for the remote to signal us that space has been made available. Signed-off-by: Bjorn Andersson --- drivers/rpmsg/qcom_glink_native.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index e0f31ed096a5..6e950e9afa82 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -113,7 +113,7 @@ struct qcom_glink { spinlock_t rx_lock; struct list_head rx_queue; - struct mutex tx_lock; + spinlock_t tx_lock; spinlock_t idr_lock; struct idr lcids; @@ -288,15 +288,14 @@ static int qcom_glink_tx(struct qcom_glink *glink, const void *data, size_t dlen, bool wait) { unsigned int tlen = hlen + dlen; + unsigned long flags; int ret; /* Reject packets that are too big */ if (tlen >= glink->tx_pipe->length) return -EINVAL; - ret = mutex_lock_interruptible(&glink->tx_lock); - if (ret) - return ret; + spin_lock_irqsave(&glink->tx_lock, flags); while (qcom_glink_tx_avail(glink) < tlen) { if (!wait) { @@ -304,7 +303,12 @@ static int qcom_glink_tx(struct qcom_glink *glink, goto out; } + /* Wait without holding the tx_lock */ + spin_unlock_irqrestore(&glink->tx_lock, flags); + usleep_range(10000, 15000); + + spin_lock_irqsave(&glink->tx_lock, flags); } qcom_glink_tx_write(glink, hdr, hlen, data, dlen); @@ -313,7 +317,7 @@ static int qcom_glink_tx(struct qcom_glink *glink, mbox_client_txdone(glink->mbox_chan, 0); out: - mutex_unlock(&glink->tx_lock); + spin_unlock_irqrestore(&glink->tx_lock, flags); return ret; } @@ -1567,7 +1571,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev, glink->features = features; glink->intentless = intentless; - mutex_init(&glink->tx_lock); + spin_lock_init(&glink->tx_lock); spin_lock_init(&glink->rx_lock); INIT_LIST_HEAD(&glink->rx_queue); INIT_WORK(&glink->rx_work, qcom_glink_work); -- 2.15.0