From: Colin Cross <ccross@android.com>
To: linux-tegra@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org, olof@lixom.net,
konkers@android.com, Colin Cross <ccross@android.com>,
Russell King <linux@arm.linux.org.uk>,
linux-kernel@vger.kernel.org
Subject: [PATCH v2 22/28] ARM: tegra: dma: Fix critical data corruption bugs
Date: Sun, 23 Jan 2011 18:01:27 -0800 [thread overview]
Message-ID: <1295834493-5019-23-git-send-email-ccross@android.com> (raw)
In-Reply-To: <1295834493-5019-1-git-send-email-ccross@android.com>
Sometimes, due to high interrupt latency in the continuous mode
of DMA transfer, the half buffer complete interrupt is handled
after DMA has transferred the full buffer. When this is detected,
stop DMA immediately and restart with the next buffer if the next
buffer is ready.
originally fixed by Victor(Weiguo) Pan <wpan@nvidia.com>
In place of using the simple spin_lock()/spi_unlock() in the
interrupt thread, using the spin_lock_irqsave() and
spin_unlock_irqrestore(). The lock is shared between the normal
process context and interrupt context.
originally fixed by Laxman Dewangan (ldewangan@nvidia.com)
The use of shadow registers caused memory corruption at physical
address 0 because the enable bit was not shadowed, and assuming it
needed to be set would enable an unconfigured dma block. Most of the
register accesses don't need to know the previous state of the
registers, and the few places that do need to modify only a few bits
in the registers are the same ones that were sometimes incorrectly
setting the enable bit. This patch convert tegra_dma_update_hardware
to set the entire register, and the other users to read-modify-write,
and drops the shadow registers completely.
Also fixes missing locking in tegra_dma_allocate_channel
Signed-off-by: Colin Cross <ccross@android.com>
---
arch/arm/mach-tegra/dma.c | 197 +++++++++++++++++++++++++--------------------
1 files changed, 108 insertions(+), 89 deletions(-)
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index a2a252d..250bc7b 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -121,17 +121,13 @@ struct tegra_dma_channel {
void __iomem *addr;
int mode;
int irq;
-
- /* Register shadow */
- u32 csr;
- u32 ahb_seq;
- u32 ahb_ptr;
- u32 apb_seq;
- u32 apb_ptr;
+ int req_transfer_count;
};
#define NV_DMA_MAX_CHANNELS 32
+static DEFINE_MUTEX(tegra_dma_lock);
+
static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
@@ -139,7 +135,6 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
struct tegra_dma_req *req);
static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
struct tegra_dma_req *req);
-static void tegra_dma_init_hw(struct tegra_dma_channel *ch);
static void tegra_dma_stop(struct tegra_dma_channel *ch);
void tegra_dma_flush(struct tegra_dma_channel *ch)
@@ -151,6 +146,9 @@ void tegra_dma_dequeue(struct tegra_dma_channel *ch)
{
struct tegra_dma_req *req;
+ if (tegra_dma_is_empty(ch))
+ return;
+
req = list_entry(ch->list.next, typeof(*req), node);
tegra_dma_dequeue_req(ch, req);
@@ -159,10 +157,10 @@ void tegra_dma_dequeue(struct tegra_dma_channel *ch)
void tegra_dma_stop(struct tegra_dma_channel *ch)
{
- unsigned int csr;
- unsigned int status;
+ u32 csr;
+ u32 status;
- csr = ch->csr;
+ csr = readl(ch->addr + APB_DMA_CHAN_CSR);
csr &= ~CSR_IE_EOC;
writel(csr, ch->addr + APB_DMA_CHAN_CSR);
@@ -176,19 +174,16 @@ void tegra_dma_stop(struct tegra_dma_channel *ch)
int tegra_dma_cancel(struct tegra_dma_channel *ch)
{
- unsigned int csr;
+ u32 csr;
unsigned long irq_flags;
spin_lock_irqsave(&ch->lock, irq_flags);
while (!list_empty(&ch->list))
list_del(ch->list.next);
- csr = ch->csr;
+ csr = readl(ch->addr + APB_DMA_CHAN_CSR);
csr &= ~CSR_REQ_SEL_MASK;
csr |= CSR_REQ_SEL_INVALID;
-
- /* Set the enable as that is not shadowed */
- csr |= CSR_ENB;
writel(csr, ch->addr + APB_DMA_CHAN_CSR);
tegra_dma_stop(ch);
@@ -230,18 +225,15 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
* - Finally stop or program the DMA to the next buffer in the
* list.
*/
- csr = ch->csr;
+ csr = readl(ch->addr + APB_DMA_CHAN_CSR);
csr &= ~CSR_REQ_SEL_MASK;
csr |= CSR_REQ_SEL_INVALID;
-
- /* Set the enable as that is not shadowed */
- csr |= CSR_ENB;
writel(csr, ch->addr + APB_DMA_CHAN_CSR);
/* Get the transfer count */
status = readl(ch->addr + APB_DMA_CHAN_STA);
to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
- req_transfer_count = (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
+ req_transfer_count = ch->req_transfer_count;
req_transfer_count += 1;
to_transfer += 1;
@@ -349,7 +341,9 @@ EXPORT_SYMBOL(tegra_dma_enqueue_req);
struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
{
int channel;
- struct tegra_dma_channel *ch;
+ struct tegra_dma_channel *ch = NULL;
+
+ mutex_lock(&tegra_dma_lock);
/* first channel is the shared channel */
if (mode & TEGRA_DMA_SHARED) {
@@ -358,11 +352,14 @@ struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
channel = find_first_zero_bit(channel_usage,
ARRAY_SIZE(dma_channels));
if (channel >= ARRAY_SIZE(dma_channels))
- return NULL;
+ goto out;
}
__set_bit(channel, channel_usage);
ch = &dma_channels[channel];
ch->mode = mode;
+
+out:
+ mutex_unlock(&tegra_dma_lock);
return ch;
}
EXPORT_SYMBOL(tegra_dma_allocate_channel);
@@ -372,22 +369,27 @@ void tegra_dma_free_channel(struct tegra_dma_channel *ch)
if (ch->mode & TEGRA_DMA_SHARED)
return;
tegra_dma_cancel(ch);
+ mutex_lock(&tegra_dma_lock);
__clear_bit(ch->id, channel_usage);
+ mutex_unlock(&tegra_dma_lock);
}
EXPORT_SYMBOL(tegra_dma_free_channel);
static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
struct tegra_dma_req *req)
{
+ u32 apb_ptr;
+ u32 ahb_ptr;
+
if (req->to_memory) {
- ch->apb_ptr = req->source_addr;
- ch->ahb_ptr = req->dest_addr;
+ apb_ptr = req->source_addr;
+ ahb_ptr = req->dest_addr;
} else {
- ch->apb_ptr = req->dest_addr;
- ch->ahb_ptr = req->source_addr;
+ apb_ptr = req->dest_addr;
+ ahb_ptr = req->source_addr;
}
- writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
- writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
+ writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
+ writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
req->status = TEGRA_DMA_REQ_INFLIGHT;
return;
@@ -401,38 +403,39 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
int ahb_bus_width;
int apb_bus_width;
int index;
- unsigned long csr;
+ u32 ahb_seq;
+ u32 apb_seq;
+ u32 ahb_ptr;
+ u32 apb_ptr;
+ u32 csr;
+
+ csr = CSR_IE_EOC | CSR_FLOW;
+ ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
+ apb_seq = 0;
- ch->csr |= CSR_FLOW;
- ch->csr &= ~CSR_REQ_SEL_MASK;
- ch->csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
- ch->ahb_seq &= ~AHB_SEQ_BURST_MASK;
- ch->ahb_seq |= AHB_SEQ_BURST_1;
+ csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
/* One shot mode is always single buffered,
* continuous mode is always double buffered
* */
if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
- ch->csr |= CSR_ONCE;
- ch->ahb_seq &= ~AHB_SEQ_DBL_BUF;
- ch->csr &= ~CSR_WCOUNT_MASK;
- ch->csr |= ((req->size>>2) - 1) << CSR_WCOUNT_SHIFT;
+ csr |= CSR_ONCE;
+ ch->req_transfer_count = (req->size >> 2) - 1;
} else {
- ch->csr &= ~CSR_ONCE;
- ch->ahb_seq |= AHB_SEQ_DBL_BUF;
+ ahb_seq |= AHB_SEQ_DBL_BUF;
/* In double buffered mode, we set the size to half the
* requested size and interrupt when half the buffer
* is full */
- ch->csr &= ~CSR_WCOUNT_MASK;
- ch->csr |= ((req->size>>3) - 1) << CSR_WCOUNT_SHIFT;
+ ch->req_transfer_count = (req->size >> 3) - 1;
}
+ csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
+
if (req->to_memory) {
- ch->csr &= ~CSR_DIR;
- ch->apb_ptr = req->source_addr;
- ch->ahb_ptr = req->dest_addr;
+ apb_ptr = req->source_addr;
+ ahb_ptr = req->dest_addr;
apb_addr_wrap = req->source_wrap;
ahb_addr_wrap = req->dest_wrap;
@@ -440,9 +443,9 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
ahb_bus_width = req->dest_bus_width;
} else {
- ch->csr |= CSR_DIR;
- ch->apb_ptr = req->dest_addr;
- ch->ahb_ptr = req->source_addr;
+ csr |= CSR_DIR;
+ apb_ptr = req->dest_addr;
+ ahb_ptr = req->source_addr;
apb_addr_wrap = req->dest_wrap;
ahb_addr_wrap = req->source_wrap;
@@ -461,8 +464,7 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
index++;
} while (index < ARRAY_SIZE(apb_addr_wrap_table));
BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
- ch->apb_seq &= ~APB_SEQ_WRAP_MASK;
- ch->apb_seq |= index << APB_SEQ_WRAP_SHIFT;
+ apb_seq |= index << APB_SEQ_WRAP_SHIFT;
/* set address wrap for AHB size */
index = 0;
@@ -472,55 +474,42 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
index++;
} while (index < ARRAY_SIZE(ahb_addr_wrap_table));
BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
- ch->ahb_seq &= ~AHB_SEQ_WRAP_MASK;
- ch->ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
+ ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
if (bus_width_table[index] == ahb_bus_width)
break;
}
BUG_ON(index == ARRAY_SIZE(bus_width_table));
- ch->ahb_seq &= ~AHB_SEQ_BUS_WIDTH_MASK;
- ch->ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
+ ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
if (bus_width_table[index] == apb_bus_width)
break;
}
BUG_ON(index == ARRAY_SIZE(bus_width_table));
- ch->apb_seq &= ~APB_SEQ_BUS_WIDTH_MASK;
- ch->apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
-
- ch->csr |= CSR_IE_EOC;
+ apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
- /* update hw registers with the shadow */
- writel(ch->csr, ch->addr + APB_DMA_CHAN_CSR);
- writel(ch->apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
- writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
- writel(ch->ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
- writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
+ writel(csr, ch->addr + APB_DMA_CHAN_CSR);
+ writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
+ writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
+ writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
+ writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
- csr = ch->csr | CSR_ENB;
+ csr |= CSR_ENB;
writel(csr, ch->addr + APB_DMA_CHAN_CSR);
req->status = TEGRA_DMA_REQ_INFLIGHT;
}
-static void tegra_dma_init_hw(struct tegra_dma_channel *ch)
-{
- /* One shot with an interrupt to CPU after transfer */
- ch->csr = CSR_ONCE | CSR_IE_EOC;
- ch->ahb_seq = AHB_SEQ_BUS_WIDTH_32 | AHB_SEQ_INTR_ENB;
- ch->apb_seq = APB_SEQ_BUS_WIDTH_32 | 1 << APB_SEQ_WRAP_SHIFT;
-}
-
static void handle_oneshot_dma(struct tegra_dma_channel *ch)
{
struct tegra_dma_req *req;
+ unsigned long irq_flags;
- spin_lock(&ch->lock);
+ spin_lock_irqsave(&ch->lock, irq_flags);
if (list_empty(&ch->list)) {
- spin_unlock(&ch->lock);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
@@ -528,8 +517,7 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch)
if (req) {
int bytes_transferred;
- bytes_transferred =
- (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
+ bytes_transferred = ch->req_transfer_count;
bytes_transferred += 1;
bytes_transferred <<= 2;
@@ -537,12 +525,12 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch)
req->bytes_transferred = bytes_transferred;
req->status = TEGRA_DMA_REQ_SUCCESS;
- spin_unlock(&ch->lock);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
/* Callback should be called without any lock */
pr_debug("%s: transferred %d bytes\n", __func__,
req->bytes_transferred);
req->complete(req);
- spin_lock(&ch->lock);
+ spin_lock_irqsave(&ch->lock, irq_flags);
}
if (!list_empty(&ch->list)) {
@@ -552,22 +540,55 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch)
if (req->status != TEGRA_DMA_REQ_INFLIGHT)
tegra_dma_update_hw(ch, req);
}
- spin_unlock(&ch->lock);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
}
static void handle_continuous_dma(struct tegra_dma_channel *ch)
{
struct tegra_dma_req *req;
+ unsigned long irq_flags;
- spin_lock(&ch->lock);
+ spin_lock_irqsave(&ch->lock, irq_flags);
if (list_empty(&ch->list)) {
- spin_unlock(&ch->lock);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
req = list_entry(ch->list.next, typeof(*req), node);
if (req) {
if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
+ bool is_dma_ping_complete;
+ is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA)
+ & STA_PING_PONG) ? true : false;
+ if (req->to_memory)
+ is_dma_ping_complete = !is_dma_ping_complete;
+ /* Out of sync - Release current buffer */
+ if (!is_dma_ping_complete) {
+ int bytes_transferred;
+
+ bytes_transferred = ch->req_transfer_count;
+ bytes_transferred += 1;
+ bytes_transferred <<= 3;
+ req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
+ req->bytes_transferred = bytes_transferred;
+ req->status = TEGRA_DMA_REQ_SUCCESS;
+ tegra_dma_stop(ch);
+
+ if (!list_is_last(&req->node, &ch->list)) {
+ struct tegra_dma_req *next_req;
+
+ next_req = list_entry(req->node.next,
+ typeof(*next_req), node);
+ tegra_dma_update_hw(ch, next_req);
+ }
+
+ list_del(&req->node);
+
+ /* DMA lock is NOT held when callbak is called */
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
+ req->complete(req);
+ return;
+ }
/* Load the next request into the hardware, if available
* */
if (!list_is_last(&req->node, &ch->list)) {
@@ -580,7 +601,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
req->status = TEGRA_DMA_REQ_SUCCESS;
/* DMA lock is NOT held when callback is called */
- spin_unlock(&ch->lock);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
if (likely(req->threshold))
req->threshold(req);
return;
@@ -591,8 +612,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
* the second interrupt */
int bytes_transferred;
- bytes_transferred =
- (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
+ bytes_transferred = ch->req_transfer_count;
bytes_transferred += 1;
bytes_transferred <<= 3;
@@ -602,7 +622,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
list_del(&req->node);
/* DMA lock is NOT held when callbak is called */
- spin_unlock(&ch->lock);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
req->complete(req);
return;
@@ -610,7 +630,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
BUG();
}
}
- spin_unlock(&ch->lock);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
}
static irqreturn_t dma_isr(int irq, void *data)
@@ -674,7 +694,6 @@ int __init tegra_dma_init(void)
spin_lock_init(&ch->lock);
INIT_LIST_HEAD(&ch->list);
- tegra_dma_init_hw(ch);
irq = INT_APB_DMA_CH0 + i;
ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
--
1.7.3.1
next prev parent reply other threads:[~2011-01-24 2:02 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <1295834493-5019-1-git-send-email-ccross@android.com>
2011-01-24 2:01 ` [PATCH v2 01/28] ARM: tegra: irq: Rename gic pointers to avoid conflicts Colin Cross
2011-01-26 7:10 ` Colin Cross
2011-01-24 2:01 ` [PATCH v2 02/28] ARM: gic: Add functions to save and restore gic state Colin Cross
2011-02-01 13:03 ` Russell King - ARM Linux
2011-01-24 2:01 ` [PATCH v2 03/28] ARM: gic: Export irq chip functions Colin Cross
2011-01-24 6:39 ` Santosh Shilimkar
2011-01-24 2:01 ` [PATCH v2 04/28] ARM: mm: cache-l2x0: Add support for re-enabling l2x0 Colin Cross
2011-01-25 15:14 ` Catalin Marinas
2011-01-25 15:41 ` Russell King - ARM Linux
2011-01-25 18:14 ` Catalin Marinas
2011-01-25 18:32 ` Santosh Shilimkar
[not found] ` <-8932138696981683633@unknownmsgid>
2011-02-04 23:32 ` Colin Cross
2011-02-04 23:43 ` Russell King - ARM Linux
2011-02-05 1:44 ` Colin Cross
2011-02-05 7:51 ` Santosh Shilimkar
2011-02-05 9:47 ` Russell King - ARM Linux
2011-02-05 10:41 ` [PATCH v2 04/28] ARM: mm: cache-l2x0: Add support forre-enabling l2x0 Santosh Shilimkar
2011-02-05 16:36 ` Colin Cross
2011-02-07 6:13 ` Santosh Shilimkar
2011-01-24 2:01 ` [PATCH v2 05/28] ARM: tegra: Centralize macros to define debug uart base Colin Cross
2011-01-24 2:01 ` [PATCH v2 06/28] ARM: tegra: Add api to control internal powergating Colin Cross
2011-01-24 2:01 ` [PATCH v2 07/28] ARM: tegra: irqs: Update irq list Colin Cross
2011-01-24 2:01 ` [PATCH v2 08/28] ARM: tegra: Add prototypes for subsystem suspend functions Colin Cross
2011-01-24 2:01 ` [PATCH v2 09/28] ARM: tegra: clock: Suspend fixes, and add new clocks Colin Cross
2011-01-24 2:01 ` [PATCH v2 10/28] ARM: tegra: pinmux: Add missing drive pingroups and fix suspend Colin Cross
2011-01-24 2:01 ` [PATCH v2 11/28] ARM: tegra: timer: Add idle and suspend support to timers Colin Cross
2011-01-24 2:01 ` [PATCH v2 12/28] ARM: tegra: irq: Add support for suspend wake sources Colin Cross
2011-01-24 2:01 ` [PATCH v2 13/28] ARM: tegra: Add suspend and hotplug support Colin Cross
2011-01-24 7:31 ` Colin Cross
2011-01-24 9:07 ` Russell King - ARM Linux
2011-01-24 9:26 ` Colin Cross
2011-01-24 10:15 ` Russell King - ARM Linux
2011-01-24 2:01 ` [PATCH v2 14/28] ARM: tegra: irq: Add set_wake and set_type support for suspend Colin Cross
2011-01-24 2:01 ` [PATCH v2 15/28] ARM: tegra: irq: Add debugfs file to show wake irqs Colin Cross
2011-01-27 19:45 ` Stephen Boyd
2011-01-24 2:01 ` [PATCH v2 16/28] ARM: tegra: irq: Implement retrigger Colin Cross
2011-01-24 2:01 ` [PATCH v2 17/28] ARM: tegra: gpio: Add support for waking from suspend Colin Cross
2011-01-24 2:01 ` [PATCH v2 18/28] ARM: tegra: add CPU_IDLE driver Colin Cross
2011-01-27 20:38 ` Stephen Boyd
2011-01-27 21:51 ` Colin Cross
2011-01-27 22:07 ` Colin Cross
2011-01-24 2:01 ` [PATCH v2 19/28] ARM: tegra: iomap: Add missing devices, fix use of SZ_8, SZ_64 Colin Cross
2011-01-24 2:01 ` [PATCH v2 20/28] ARM: tegra: cpufreq: Disable cpufreq during suspend Colin Cross
2011-01-24 14:41 ` Mark Brown
2011-01-24 18:50 ` Colin Cross
2011-01-24 19:35 ` Mark Brown
2011-01-24 19:52 ` Colin Cross
2011-01-24 20:26 ` Mark Brown
2011-01-24 20:52 ` Colin Cross
2011-01-24 21:08 ` Mark Brown
2011-01-24 21:24 ` Colin Cross
2011-01-25 4:26 ` Kyungmin Park
2011-01-24 2:01 ` [PATCH v2 21/28] ARM: tegra: Allow overriding arch_reset Colin Cross
2011-01-24 2:01 ` Colin Cross [this message]
2011-01-24 2:01 ` [PATCH v2 23/28] ARM: tegra: add tegra_defconfig Colin Cross
2011-01-24 2:01 ` [PATCH v2 24/28] ARM: tegra: Use writel_relaxed in tegra_init_cache Colin Cross
2011-01-24 2:01 ` [PATCH v2 25/28] ARM: tegra: suspend: Save protected aperture across LP0 Colin Cross
2011-01-24 2:01 ` [PATCH v2 26/28] ARM: tegra: suspend: protect suspend functions with CONFIG_PM Colin Cross
2011-01-24 2:01 ` [PATCH v2 27/28] ARM: tegra: enable emc clock updates after lp0 Colin Cross
2011-01-24 2:01 ` [PATCH v2 28/28] ARM: tegra: clock: Add forward reference to struct clk Colin Cross
2011-01-24 10:26 ` Russell King - ARM Linux
2011-01-25 1:23 ` Colin Cross
2011-01-26 3:19 ` Grant Likely
2011-01-26 3:21 ` Colin Cross
2011-01-26 4:16 ` Grant Likely
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1295834493-5019-23-git-send-email-ccross@android.com \
--to=ccross@android.com \
--cc=konkers@android.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-tegra@vger.kernel.org \
--cc=linux@arm.linux.org.uk \
--cc=olof@lixom.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).