From: Steven Rostedt <rostedt@goodmis.org>
To: linux-kernel@vger.kernel.org,
linux-rt-users <linux-rt-users@vger.kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>,
Carsten Emde <C.Emde@osadl.org>,
Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
John Kacur <jkacur@redhat.com>,
Paul Gortmaker <paul.gortmaker@windriver.com>,
Julia Cartwright <julia@ni.com>,
Daniel Wagner <daniel.wagner@siemens.com>,
tom.zanussi@linux.intel.com
Subject: [PATCH RT 08/24] Revert "x86: UV: raw_spinlock conversion"
Date: Fri, 07 Sep 2018 16:59:05 -0400 [thread overview]
Message-ID: <20180907205930.950515188@goodmis.org> (raw)
In-Reply-To: 20180907205857.262840394@goodmis.org
4.14.63-rt41-rc2 stable review patch.
If anyone has any objections, please let me know.
------------------
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
[ Upstream commit 2a9c45d8f89112458364285cbe2b0729561953f1 ]
Drop the Ultraviolet patch. UV looks broken upstream for PREEMPT, too.
Mike is the only person I know that has such a thing and he isn't going
to fix this upstream (from 1526977462.6491.1.camel@gmx.de):
|From: Mike Galbraith <gleep@gmx.de>
|On Tue, 2018-05-22 at 08:50 +0200, Sebastian Andrzej Siewior wrote:
|>
|> Regarding the preempt_disable() in the original patch in uv_read_rtc():
|> This looks essential for PREEMPT configs. Is it possible to get this
|> tested by someone or else get rid of the UV code? It looks broken for
|> "uv_get_min_hub_revision_id() != 1".
|
|I suspect SGI cares not one whit about PREEMPT.
|
|> Why does PREEMPT_RT require migrate_disable() but PREEMPT only is fine
|> as-is? This does not look right.
|
|UV is not ok with a PREEMPT config, it's just that for RT it's dirt
|simple to shut it up, whereas for PREEMPT, preempt_disable() across
|uv_bau_init() doesn't cut it due to allocations, and whatever else I
|would have met before ending the whack-a-mole game.
|
|If I were in your shoes, I think I'd just stop caring about UV until a
|real user appears. AFAIK, I'm the only guy who ever ran RT on UV, and
|I only did so because SUSE asked me to look into it.. years ago now.
|
| -Mike
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
arch/x86/include/asm/uv/uv_bau.h | 14 +++++++-------
arch/x86/platform/uv/tlb_uv.c | 26 +++++++++++++-------------
arch/x86/platform/uv/uv_time.c | 20 ++++++++------------
3 files changed, 28 insertions(+), 32 deletions(-)
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 2ac6e347bdc5..7cac79802ad2 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -643,9 +643,9 @@ struct bau_control {
cycles_t send_message;
cycles_t period_end;
cycles_t period_time;
- raw_spinlock_t uvhub_lock;
- raw_spinlock_t queue_lock;
- raw_spinlock_t disable_lock;
+ spinlock_t uvhub_lock;
+ spinlock_t queue_lock;
+ spinlock_t disable_lock;
/* tunables */
int max_concurr;
int max_concurr_const;
@@ -847,15 +847,15 @@ static inline int atom_asr(short i, struct atomic_short *v)
* to be lowered below the current 'v'. atomic_add_unless can only stop
* on equal.
*/
-static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u)
+static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
{
- raw_spin_lock(lock);
+ spin_lock(lock);
if (atomic_read(v) >= u) {
- raw_spin_unlock(lock);
+ spin_unlock(lock);
return 0;
}
atomic_inc(v);
- raw_spin_unlock(lock);
+ spin_unlock(lock);
return 1;
}
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 5607611df740..34f9a9ce6236 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -740,9 +740,9 @@ static void destination_plugged(struct bau_desc *bau_desc,
quiesce_local_uvhub(hmaster);
- raw_spin_lock(&hmaster->queue_lock);
+ spin_lock(&hmaster->queue_lock);
reset_with_ipi(&bau_desc->distribution, bcp);
- raw_spin_unlock(&hmaster->queue_lock);
+ spin_unlock(&hmaster->queue_lock);
end_uvhub_quiesce(hmaster);
@@ -762,9 +762,9 @@ static void destination_timeout(struct bau_desc *bau_desc,
quiesce_local_uvhub(hmaster);
- raw_spin_lock(&hmaster->queue_lock);
+ spin_lock(&hmaster->queue_lock);
reset_with_ipi(&bau_desc->distribution, bcp);
- raw_spin_unlock(&hmaster->queue_lock);
+ spin_unlock(&hmaster->queue_lock);
end_uvhub_quiesce(hmaster);
@@ -785,7 +785,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
cycles_t tm1;
hmaster = bcp->uvhub_master;
- raw_spin_lock(&hmaster->disable_lock);
+ spin_lock(&hmaster->disable_lock);
if (!bcp->baudisabled) {
stat->s_bau_disabled++;
tm1 = get_cycles();
@@ -798,7 +798,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
}
}
}
- raw_spin_unlock(&hmaster->disable_lock);
+ spin_unlock(&hmaster->disable_lock);
}
static void count_max_concurr(int stat, struct bau_control *bcp,
@@ -861,7 +861,7 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
*/
static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
{
- raw_spinlock_t *lock = &hmaster->uvhub_lock;
+ spinlock_t *lock = &hmaster->uvhub_lock;
atomic_t *v;
v = &hmaster->active_descriptor_count;
@@ -995,7 +995,7 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
struct bau_control *hmaster;
hmaster = bcp->uvhub_master;
- raw_spin_lock(&hmaster->disable_lock);
+ spin_lock(&hmaster->disable_lock);
if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
stat->s_bau_reenabled++;
for_each_present_cpu(tcpu) {
@@ -1007,10 +1007,10 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
tbcp->period_giveups = 0;
}
}
- raw_spin_unlock(&hmaster->disable_lock);
+ spin_unlock(&hmaster->disable_lock);
return 0;
}
- raw_spin_unlock(&hmaster->disable_lock);
+ spin_unlock(&hmaster->disable_lock);
return -1;
}
@@ -1942,9 +1942,9 @@ static void __init init_per_cpu_tunables(void)
bcp->cong_reps = congested_reps;
bcp->disabled_period = sec_2_cycles(disabled_period);
bcp->giveup_limit = giveup_limit;
- raw_spin_lock_init(&bcp->queue_lock);
- raw_spin_lock_init(&bcp->uvhub_lock);
- raw_spin_lock_init(&bcp->disable_lock);
+ spin_lock_init(&bcp->queue_lock);
+ spin_lock_init(&bcp->uvhub_lock);
+ spin_lock_init(&bcp->disable_lock);
}
}
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index badf377efc21..b082d71b08ee 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -57,7 +57,7 @@ static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
/* There is one of these allocated per node */
struct uv_rtc_timer_head {
- raw_spinlock_t lock;
+ spinlock_t lock;
/* next cpu waiting for timer, local node relative: */
int next_cpu;
/* number of cpus on this node: */
@@ -177,7 +177,7 @@ static __init int uv_rtc_allocate_timers(void)
uv_rtc_deallocate_timers();
return -ENOMEM;
}
- raw_spin_lock_init(&head->lock);
+ spin_lock_init(&head->lock);
head->ncpus = uv_blade_nr_possible_cpus(bid);
head->next_cpu = -1;
blade_info[bid] = head;
@@ -231,7 +231,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
unsigned long flags;
int next_cpu;
- raw_spin_lock_irqsave(&head->lock, flags);
+ spin_lock_irqsave(&head->lock, flags);
next_cpu = head->next_cpu;
*t = expires;
@@ -243,12 +243,12 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
if (uv_setup_intr(cpu, expires)) {
*t = ULLONG_MAX;
uv_rtc_find_next_timer(head, pnode);
- raw_spin_unlock_irqrestore(&head->lock, flags);
+ spin_unlock_irqrestore(&head->lock, flags);
return -ETIME;
}
}
- raw_spin_unlock_irqrestore(&head->lock, flags);
+ spin_unlock_irqrestore(&head->lock, flags);
return 0;
}
@@ -267,7 +267,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
unsigned long flags;
int rc = 0;
- raw_spin_lock_irqsave(&head->lock, flags);
+ spin_lock_irqsave(&head->lock, flags);
if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
rc = 1;
@@ -279,7 +279,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
uv_rtc_find_next_timer(head, pnode);
}
- raw_spin_unlock_irqrestore(&head->lock, flags);
+ spin_unlock_irqrestore(&head->lock, flags);
return rc;
}
@@ -299,17 +299,13 @@ static int uv_rtc_unset_timer(int cpu, int force)
static u64 uv_read_rtc(struct clocksource *cs)
{
unsigned long offset;
- u64 cycles;
- preempt_disable();
if (uv_get_min_hub_revision_id() == 1)
offset = 0;
else
offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
- cycles = (u64)uv_read_local_mmr(UVH_RTC | offset);
- preempt_enable();
- return cycles;
+ return (u64)uv_read_local_mmr(UVH_RTC | offset);
}
/*
--
2.18.0
next prev parent reply other threads:[~2018-09-07 21:00 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-09-07 20:58 [PATCH RT 00/24] Linux 4.14.63-rt41-rc2 Steven Rostedt
2018-09-07 20:58 ` [PATCH RT 01/24] sched/fair: Fix CFS bandwidth control lockdep DEADLOCK report Steven Rostedt
2018-09-07 20:58 ` [PATCH RT 02/24] locallock: provide {get,put}_locked_ptr() variants Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 03/24] squashfs: make use of local lock in multi_cpu decompressor Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 04/24] PM / suspend: Prevent might sleep splats (updated) Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 05/24] PM / wakeup: Make events_lock a RAW_SPINLOCK Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 06/24] PM / s2idle: Make s2idle_wait_head swait based Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 07/24] seqlock: provide the same ordering semantics as mainline Steven Rostedt
2018-09-07 20:59 ` Steven Rostedt [this message]
2018-09-07 20:59 ` [PATCH RT 09/24] Revert "timer: delay waking softirqs from the jiffy tick" Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 10/24] irqchip/gic-v3-its: Make its_lock a raw_spin_lock_t Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 11/24] sched/migrate_disable: fallback to preempt_disable() instead barrier() Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 12/24] irqchip/gic-v3-its: Move ITS ->pend_page allocation into an early CPU up hook Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 13/24] irqchip/gic-v3-its: Move pending table allocation to init time Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 14/24] x86/ioapic: Dont let setaffinity unmask threaded EOI interrupt too early Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 15/24] efi: Allow efi=runtime Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 16/24] efi: Disable runtime services on RT Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 17/24] crypto: cryptd - add a lock instead preempt_disable/local_bh_disable Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 18/24] crypto: scompress - serialize RT percpu scratch buffer access with a local lock Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 19/24] sched/core: Avoid __schedule() being called twice in a row Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 20/24] Revert "arm64/xen: Make XEN depend on !RT" Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 21/24] sched: Allow pinned user tasks to be awakened to the CPU they pinned Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 23/24] Revert "softirq: keep the softirq pending check RT-only" Steven Rostedt
2018-09-07 20:59 ` [PATCH RT 24/24] Linux 4.14.63-rt41-rc2 Steven Rostedt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180907205930.950515188@goodmis.org \
--to=rostedt@goodmis.org \
--cc=C.Emde@osadl.org \
--cc=bigeasy@linutronix.de \
--cc=daniel.wagner@siemens.com \
--cc=jkacur@redhat.com \
--cc=julia@ni.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rt-users@vger.kernel.org \
--cc=paul.gortmaker@windriver.com \
--cc=tglx@linutronix.de \
--cc=tom.zanussi@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).