From: Feng Kan <fkan@apm.com>
To: Jassi Brar <jassisinghbrar@gmail.com>
Cc: =devicetree@vger.kernel.org, patches <patches@apm.com>,
Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
"linux-arm-kernel@lists.infradead.org"
<linux-arm-kernel@lists.infradead.org>
Subject: Re: [PATCH 1/3] mailbox: add support for APM X-Gene platform mailbox driver
Date: Tue, 21 Apr 2015 14:14:32 -0700 [thread overview]
Message-ID: <CAL85gmC-rSPqmQ2nqs=MDQCjUUh30vSfo7vEMGOcijBia31q8g@mail.gmail.com> (raw)
In-Reply-To: <CABb+yY31bGbQ4xipyW9pe+J6QV3con=QFuasEfmpgNNfkKKPVA@mail.gmail.com>
On Tue, Apr 21, 2015 at 12:42 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
> On Wed, Feb 18, 2015 at 8:40 AM, Feng Kan <fkan@apm.com> wrote:
>
>> +#define MBOX_CON_NAME "slimpro-mbox"
>> +#define MBOX_REG_SET_OFFSET 0x1000
>> +#define MBOX_CNT 8
>> +#define MBOX_STATUS_AVAIL_MASK 0x00010000
>> +#define MBOX_STATUS_ACK_MASK 0x00000001
>>
> Nit: BIT(16) and BIT(0) is more readable
will fix
>
>> +/* Configuration and Status Registers */
>> +struct slimpro_mbox_reg {
>> + u32 in;
>> + u32 din0;
>> + u32 din1;
>> + u32 rsvd1;
>> + u32 out;
>> + u32 dout0;
>> + u32 dout1;
>> + u32 rsvd2;
>> + u32 status;
>> + u32 statusmask;
>> +};
>> +
> Why not the normal way of defining offset macros, like most drivers do?
I personally don't prefer one way over another, let me know if you want me
to change to use defines.
>
>> +struct slimpro_mbox_chan {
>> + struct device *dev;
>> + struct mbox_chan *chan;
>> + struct slimpro_mbox_reg __iomem *reg;
>> + int id;
>> + int irq;
>> + u32 rx_msg[3];
>> +};
>> +
>> +struct slimpro_mbox {
>> + struct mbox_controller mb_ctrl;
>> + struct slimpro_mbox_chan mc[MBOX_CNT];
>> + struct mbox_chan chans[MBOX_CNT];
>> +};
>> +
>> +static struct slimpro_mbox_chan *to_slimpro_mbox_chan(struct mbox_chan *chan)
>> +{
>> + if (!chan || !chan->con_priv)
>> + return NULL;
> This seems un-necessary. Anyway you don't care for NULL returned :)
> Probably just kill this function?
done
>
>> +
>> + return (struct slimpro_mbox_chan *)chan->con_priv;
>> +}
>> +
>> +static void mb_chan_send_msg(struct slimpro_mbox_chan *mb_chan, u32 *msg)
>> +{
>> + writel(msg[1], &mb_chan->reg->dout0);
>> + writel(msg[2], &mb_chan->reg->dout1);
>> + writel(msg[0], &mb_chan->reg->out);
>> +}
>> +
>> +static void mb_chan_recv_msg(struct slimpro_mbox_chan *mb_chan)
>> +{
>> + mb_chan->rx_msg[1] = readl(&mb_chan->reg->din0);
>> + mb_chan->rx_msg[2] = readl(&mb_chan->reg->din1);
>> + mb_chan->rx_msg[0] = readl(&mb_chan->reg->in);
>> +}
>> +
> maybe move the send/recv function inline the caller?
done
>
>> +static void mb_chan_enable_int(struct slimpro_mbox_chan *mb_chan, u32 mask)
>> +{
>> + u32 val = readl(&mb_chan->reg->statusmask);
>> +
>> + val &= ~mask;
>> +
>> + writel(val, &mb_chan->reg->statusmask);
>> +}
>> +
>> +static void mb_chan_disable_int(struct slimpro_mbox_chan *mb_chan, u32 mask)
>> +{
>> + u32 val = readl(&mb_chan->reg->statusmask);
>> +
>> + val |= mask;
>> +
>> + writel(val, &mb_chan->reg->statusmask);
>> +}
>> +
>> +static int mb_chan_status_ack(struct slimpro_mbox_chan *mb_chan)
>> +{
>> + u32 val = readl(&mb_chan->reg->status);
>> +
>> + if (val & MBOX_STATUS_ACK_MASK) {
>> + writel(MBOX_STATUS_ACK_MASK, &mb_chan->reg->status);
>> + return 1;
>> + }
>> + return 0;
>> +}
>> +
>> +static int mb_chan_status_avail(struct slimpro_mbox_chan *mb_chan)
>> +{
>> + u32 val = readl(&mb_chan->reg->status);
>> +
>> + if (val & MBOX_STATUS_AVAIL_MASK) {
>> + mb_chan_recv_msg(mb_chan);
>> + writel(MBOX_STATUS_AVAIL_MASK, &mb_chan->reg->status);
>> + return 1;
>> + }
>> + return 0;
>> +}
>> +
>> +static irqreturn_t slimpro_mbox_irq(int irq, void *id)
>> +{
>> + struct slimpro_mbox_chan *mb_chan = id;
>> +
>> + if (mb_chan_status_ack(mb_chan))
>> + mbox_chan_txdone(mb_chan->chan, 0);
>> +
>> + if (mb_chan_status_avail(mb_chan)) {
>> + mb_chan_recv_msg(mb_chan)
>>
> you already did this in mb_chan_status_avail() is it needed?
removed
>
>> + mbox_chan_received_data(mb_chan->chan, mb_chan->rx_msg);
>> + }
>> +
>> + return IRQ_HANDLED;
>> +}
>> +
>> +static int slimpro_mbox_send_data(struct mbox_chan *chan, void *msg)
>> +{
>> + struct slimpro_mbox_chan *mb_chan = to_slimpro_mbox_chan(chan);
>> +
>> + mb_chan_send_msg(mb_chan, msg);
>> + return 0;
>> +}
>> +
>> +static int slimpro_mbox_startup(struct mbox_chan *chan)
>> +{
>> + struct slimpro_mbox_chan *mb_chan = to_slimpro_mbox_chan(chan);
>> + int rc;
>> +
>> + rc = devm_request_irq(mb_chan->dev, mb_chan->irq, slimpro_mbox_irq, 0,
>> + MBOX_CON_NAME, mb_chan);
>>
> You may want to use IRQF_SHARED flag here and make slimpro_mbox_irq()
> aware of that -- some platforms tie together irq lines of all
> instances of a resource, like dma, mbox, so they may share the same
> irq line.
this is an internal dedicated irq line.
>
>
>> +static int __init slimpro_mbox_probe(struct platform_device *pdev)
>> +{
>> + struct slimpro_mbox *ctx;
>> + struct resource *regs;
>> + void __iomem *mb_base;
>> + int rc;
>> + int i;
>> +
>> + ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL);
>> + if (IS_ERR(ctx))
>> + return PTR_ERR(ctx);
>> +
>> + platform_set_drvdata(pdev, ctx);
>> +
>> + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
>> + mb_base = devm_ioremap_resource(&pdev->dev, regs);
>> + if (IS_ERR(mb_base))
>> + return PTR_ERR(mb_base);
>> +
>> + /* Setup mailbox links */
>> + for (i = 0; i < MBOX_CNT; i++) {
>> + ctx->mc[i].irq = platform_get_irq(pdev, i);
>> + if (ctx->mc[i].irq < 0) {
>> + dev_err(&pdev->dev, "no IRQ at index %d\n",
>> + ctx->mc[i].irq);
>> + return -ENODEV;
>> + }
>> +
>> + ctx->mc[i].dev = &pdev->dev;
>> + ctx->mc[i].reg = mb_base + i * MBOX_REG_SET_OFFSET;
>> + ctx->mc[i].id = i;
>> + ctx->mc[i].chan = &ctx->chans[i];
>> + ctx->chans[i].con_priv = &ctx->mc[i];
>>
> Note to self: Maybe we should make it possible to populate a channel
> during request/of_xlate.
>
>> +
>> +static int __init slimpro_mbox_init(void)
>> +{
>> + return platform_driver_register(&slimpro_mbox_driver);
>> +}
>> +
>> +static void __exit slimpro_mbox_exit(void)
>> +{
>> +}
>> +
>> +subsys_initcall(slimpro_mbox_init);
>> +module_exit(slimpro_mbox_exit);
will remove
> Why empty module_exit?
>
> regards.
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2015-04-21 21:14 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-02-17 23:40 [PATCH 1/3] mailbox: add support for APM X-Gene platform mailbox driver Feng Kan
2015-02-17 23:40 ` [PATCH 2/3] Documentation: mailbox: Add APM X-Gene SLIMpro mailbox dts documentation Feng Kan
2015-02-17 23:40 ` [PATCH 3/3] arm64: dts: mailbox device tree node for APM X-Gene platform Feng Kan
2015-04-21 7:42 ` [PATCH 1/3] mailbox: add support for APM X-Gene platform mailbox driver Jassi Brar
2015-04-21 21:14 ` Feng Kan [this message]
-- strict thread matches above, loose matches on Subject: below --
2015-02-17 23:41 Feng Kan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to='CAL85gmC-rSPqmQ2nqs=MDQCjUUh30vSfo7vEMGOcijBia31q8g@mail.gmail.com' \
--to=fkan@apm.com \
--cc==devicetree@vger.kernel.org \
--cc=jassisinghbrar@gmail.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=patches@apm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).