* [PATCH v3 0/5] rv: LTL per-cpu monitor type and real-time scheduling monitor
@ 2025-08-11 8:40 Nam Cao
2025-08-11 8:40 ` [PATCH v3 1/5] rv/ltl: Prepare for other monitor types Nam Cao
` (4 more replies)
0 siblings, 5 replies; 7+ messages in thread
From: Nam Cao @ 2025-08-11 8:40 UTC (permalink / raw)
To: Steven Rostedt, Masami Hiramatsu, Mathieu Desnoyers,
Gabriele Monaco, linux-trace-kernel, linux-kernel
Cc: Nam Cao
Hi,
This series adds support for linear temporal logic per-cpu monitor type,
analogous to deterministic automaton per-cpu monitor. Then a new per-cpu
monitor is added which validates real-time scheduling.
Nam Cao (5):
rv/ltl: Prepare for other monitor types
rv/ltl: Support per-cpu monitors
verification/rvgen/ltl: Support per-cpu monitor generation
sched: Add task enqueue/dequeue trace points
rv: Add rts monitor
Documentation/trace/rv/monitor_rtapp.rst | 19 +++
include/linux/rv.h | 4 +
include/rv/ltl_monitor.h | 115 +++++++++-----
include/trace/events/sched.h | 13 ++
kernel/sched/core.c | 8 +-
kernel/trace/rv/Kconfig | 5 +
kernel/trace/rv/Makefile | 1 +
.../trace/rv/monitors/pagefault/pagefault.h | 2 +
kernel/trace/rv/monitors/rts/Kconfig | 17 +++
kernel/trace/rv/monitors/rts/rts.c | 144 ++++++++++++++++++
kernel/trace/rv/monitors/rts/rts.h | 126 +++++++++++++++
kernel/trace/rv/monitors/rts/rts_trace.h | 15 ++
kernel/trace/rv/monitors/sleep/sleep.h | 2 +
kernel/trace/rv/rv_trace.h | 47 ++++++
tools/verification/models/rtapp/rts.ltl | 5 +
tools/verification/rvgen/rvgen/ltl2k.py | 48 +++++-
.../rvgen/rvgen/templates/ltl2k/main.c | 9 +-
.../rvgen/rvgen/templates/ltl2k/trace.h | 7 +-
18 files changed, 538 insertions(+), 49 deletions(-)
create mode 100644 kernel/trace/rv/monitors/rts/Kconfig
create mode 100644 kernel/trace/rv/monitors/rts/rts.c
create mode 100644 kernel/trace/rv/monitors/rts/rts.h
create mode 100644 kernel/trace/rv/monitors/rts/rts_trace.h
create mode 100644 tools/verification/models/rtapp/rts.ltl
--
2.39.5
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v3 1/5] rv/ltl: Prepare for other monitor types
2025-08-11 8:40 [PATCH v3 0/5] rv: LTL per-cpu monitor type and real-time scheduling monitor Nam Cao
@ 2025-08-11 8:40 ` Nam Cao
2025-08-11 8:40 ` [PATCH v3 2/5] rv/ltl: Support per-cpu monitors Nam Cao
` (3 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Nam Cao @ 2025-08-11 8:40 UTC (permalink / raw)
To: Steven Rostedt, Masami Hiramatsu, Mathieu Desnoyers,
Gabriele Monaco, linux-trace-kernel, linux-kernel
Cc: Nam Cao
rv/ltl_monitor.h is the template file used by all LTL monitors. But only
per-task monitor is supported.
Prepare to support for other monitor types:
- Change the monitored target type into an opaque type which will be
defined differently depending on the monitor type. This type is only
defined as struct task_struct * for now.
- Separate out the per-task-specific printf format and arguments, so that
rv_cond_react() can be shared with other monitor types.
No functional change intended.
Reviewed-by: Gabriele Monaco <gmonaco@redhat.com>
Signed-off-by: Nam Cao <namcao@linutronix.de>
---
v2: define the macros for DA usage as well
---
include/linux/rv.h | 4 +
include/rv/ltl_monitor.h | 85 +++++++++++--------
.../trace/rv/monitors/pagefault/pagefault.h | 2 +
kernel/trace/rv/monitors/sleep/sleep.h | 2 +
4 files changed, 58 insertions(+), 35 deletions(-)
diff --git a/include/linux/rv.h b/include/linux/rv.h
index 14410a42faef..10a8be730d89 100644
--- a/include/linux/rv.h
+++ b/include/linux/rv.h
@@ -13,6 +13,10 @@
#define MAX_DA_NAME_LEN 32
#define MAX_DA_RETRY_RACING_EVENTS 3
+#define RV_MON_GLOBAL 0
+#define RV_MON_PER_CPU 1
+#define RV_MON_PER_TASK 2
+
#ifdef CONFIG_RV
#include <linux/bitops.h>
#include <linux/types.h>
diff --git a/include/rv/ltl_monitor.h b/include/rv/ltl_monitor.h
index 67031a774e3d..9dabc5b133a3 100644
--- a/include/rv/ltl_monitor.h
+++ b/include/rv/ltl_monitor.h
@@ -16,49 +16,63 @@
#error "Please include $(MODEL_NAME).h generated by rvgen"
#endif
+#if LTL_MONITOR_TYPE == RV_MON_PER_TASK
+
+#define TARGET_PRINT_FORMAT "%s[%d]"
+#define TARGET_PRINT_ARGS(task) task->comm, task->pid
+
+typedef struct task_struct *monitor_target;
+
+#endif
+
#ifdef CONFIG_RV_REACTORS
#define RV_MONITOR_NAME CONCATENATE(rv_, MONITOR_NAME)
static struct rv_monitor RV_MONITOR_NAME;
-static void rv_cond_react(struct task_struct *task)
+static void rv_cond_react(monitor_target target)
{
if (!rv_reacting_on() || !RV_MONITOR_NAME.react)
return;
- RV_MONITOR_NAME.react("rv: "__stringify(MONITOR_NAME)": %s[%d]: violation detected\n",
- task->comm, task->pid);
+
+ RV_MONITOR_NAME.react(
+ "rv: "__stringify(MONITOR_NAME)": "TARGET_PRINT_FORMAT": violation detected\n",
+ TARGET_PRINT_ARGS(target));
}
#else
-static void rv_cond_react(struct task_struct *task)
+static void rv_cond_react(monitor_target target)
{
}
#endif
-static int ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
+static void ltl_atoms_fetch(monitor_target target, struct ltl_monitor *mon);
+static void ltl_atoms_init(monitor_target target, struct ltl_monitor *mon, bool target_creation);
-static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon);
-static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation);
+#if LTL_MONITOR_TYPE == RV_MON_PER_TASK
+static int ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
-static struct ltl_monitor *ltl_get_monitor(struct task_struct *task)
+static struct ltl_monitor *ltl_get_monitor(monitor_target target)
{
- return &task->rv[ltl_monitor_slot].ltl_mon;
+ return &target->rv[ltl_monitor_slot].ltl_mon;
}
+#endif
-static void ltl_task_init(struct task_struct *task, bool task_creation)
+static void ltl_target_init(monitor_target target, bool target_creation)
{
- struct ltl_monitor *mon = ltl_get_monitor(task);
+ struct ltl_monitor *mon = ltl_get_monitor(target);
memset(&mon->states, 0, sizeof(mon->states));
for (int i = 0; i < LTL_NUM_ATOM; ++i)
__set_bit(i, mon->unknown_atoms);
- ltl_atoms_init(task, mon, task_creation);
- ltl_atoms_fetch(task, mon);
+ ltl_atoms_init(target, mon, target_creation);
+ ltl_atoms_fetch(target, mon);
}
+#if LTL_MONITOR_TYPE == RV_MON_PER_TASK
static void handle_task_newtask(void *data, struct task_struct *task, unsigned long flags)
{
- ltl_task_init(task, true);
+ ltl_target_init(task, true);
}
static int ltl_monitor_init(void)
@@ -77,10 +91,10 @@ static int ltl_monitor_init(void)
read_lock(&tasklist_lock);
for_each_process_thread(g, p)
- ltl_task_init(p, false);
+ ltl_target_init(p, false);
for_each_present_cpu(cpu)
- ltl_task_init(idle_task(cpu), false);
+ ltl_target_init(idle_task(cpu), false);
read_unlock(&tasklist_lock);
@@ -94,17 +108,18 @@ static void ltl_monitor_destroy(void)
rv_put_task_monitor_slot(ltl_monitor_slot);
ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
}
+#endif
-static void ltl_illegal_state(struct task_struct *task, struct ltl_monitor *mon)
+static void ltl_illegal_state(monitor_target target, struct ltl_monitor *mon)
{
- CONCATENATE(trace_error_, MONITOR_NAME)(task);
- rv_cond_react(task);
+ CONCATENATE(trace_error_, MONITOR_NAME)(target);
+ rv_cond_react(target);
}
-static void ltl_attempt_start(struct task_struct *task, struct ltl_monitor *mon)
+static void ltl_attempt_start(monitor_target target, struct ltl_monitor *mon)
{
if (rv_ltl_all_atoms_known(mon))
- ltl_start(task, mon);
+ ltl_start(target, mon);
}
static inline void ltl_atom_set(struct ltl_monitor *mon, enum ltl_atom atom, bool value)
@@ -117,7 +132,7 @@ static inline void ltl_atom_set(struct ltl_monitor *mon, enum ltl_atom atom, boo
}
static void
-ltl_trace_event(struct task_struct *task, struct ltl_monitor *mon, unsigned long *next_state)
+ltl_trace_event(monitor_target target, struct ltl_monitor *mon, unsigned long *next_state)
{
const char *format_str = "%s";
DECLARE_SEQ_BUF(atoms, 64);
@@ -137,10 +152,10 @@ ltl_trace_event(struct task_struct *task, struct ltl_monitor *mon, unsigned long
}
}
- CONCATENATE(trace_event_, MONITOR_NAME)(task, states, atoms.buffer, next);
+ CONCATENATE(trace_event_, MONITOR_NAME)(target, states, atoms.buffer, next);
}
-static void ltl_validate(struct task_struct *task, struct ltl_monitor *mon)
+static void ltl_validate(monitor_target target, struct ltl_monitor *mon)
{
DECLARE_BITMAP(next_states, RV_MAX_BA_STATES) = {0};
@@ -152,35 +167,35 @@ static void ltl_validate(struct task_struct *task, struct ltl_monitor *mon)
ltl_possible_next_states(mon, i, next_states);
}
- ltl_trace_event(task, mon, next_states);
+ ltl_trace_event(target, mon, next_states);
memcpy(mon->states, next_states, sizeof(next_states));
if (!rv_ltl_valid_state(mon))
- ltl_illegal_state(task, mon);
+ ltl_illegal_state(target, mon);
}
-static void ltl_atom_update(struct task_struct *task, enum ltl_atom atom, bool value)
+static void ltl_atom_update(monitor_target target, enum ltl_atom atom, bool value)
{
- struct ltl_monitor *mon = ltl_get_monitor(task);
+ struct ltl_monitor *mon = ltl_get_monitor(target);
ltl_atom_set(mon, atom, value);
- ltl_atoms_fetch(task, mon);
+ ltl_atoms_fetch(target, mon);
if (!rv_ltl_valid_state(mon)) {
- ltl_attempt_start(task, mon);
+ ltl_attempt_start(target, mon);
return;
}
- ltl_validate(task, mon);
+ ltl_validate(target, mon);
}
-static void __maybe_unused ltl_atom_pulse(struct task_struct *task, enum ltl_atom atom, bool value)
+static void __maybe_unused ltl_atom_pulse(monitor_target target, enum ltl_atom atom, bool value)
{
- struct ltl_monitor *mon = ltl_get_monitor(task);
+ struct ltl_monitor *mon = ltl_get_monitor(target);
- ltl_atom_update(task, atom, value);
+ ltl_atom_update(target, atom, value);
ltl_atom_set(mon, atom, !value);
- ltl_validate(task, mon);
+ ltl_validate(target, mon);
}
diff --git a/kernel/trace/rv/monitors/pagefault/pagefault.h b/kernel/trace/rv/monitors/pagefault/pagefault.h
index c580ec194009..d449af84c24c 100644
--- a/kernel/trace/rv/monitors/pagefault/pagefault.h
+++ b/kernel/trace/rv/monitors/pagefault/pagefault.h
@@ -11,6 +11,8 @@
#define MONITOR_NAME pagefault
+#define LTL_MONITOR_TYPE RV_MON_PER_TASK
+
enum ltl_atom {
LTL_PAGEFAULT,
LTL_RT,
diff --git a/kernel/trace/rv/monitors/sleep/sleep.h b/kernel/trace/rv/monitors/sleep/sleep.h
index 2ab46fd218d2..0c87875d9040 100644
--- a/kernel/trace/rv/monitors/sleep/sleep.h
+++ b/kernel/trace/rv/monitors/sleep/sleep.h
@@ -11,6 +11,8 @@
#define MONITOR_NAME sleep
+#define LTL_MONITOR_TYPE RV_MON_PER_TASK
+
enum ltl_atom {
LTL_ABORT_SLEEP,
LTL_BLOCK_ON_RT_MUTEX,
--
2.39.5
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v3 2/5] rv/ltl: Support per-cpu monitors
2025-08-11 8:40 [PATCH v3 0/5] rv: LTL per-cpu monitor type and real-time scheduling monitor Nam Cao
2025-08-11 8:40 ` [PATCH v3 1/5] rv/ltl: Prepare for other monitor types Nam Cao
@ 2025-08-11 8:40 ` Nam Cao
2025-08-11 8:40 ` [PATCH v3 3/5] verification/rvgen/ltl: Support per-cpu monitor generation Nam Cao
` (2 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Nam Cao @ 2025-08-11 8:40 UTC (permalink / raw)
To: Steven Rostedt, Masami Hiramatsu, Mathieu Desnoyers,
Gabriele Monaco, linux-trace-kernel, linux-kernel
Cc: Nam Cao
Add support for per-cpu run-time verification linear temporal logic
monitors. This is analogous to deterministic automaton per-cpu monitors.
Reviewed-by: Gabriele Monaco <gmonaco@redhat.com>
Signed-off-by: Nam Cao <namcao@linutronix.de>
---
v3: fixup build issue on sh4
v2: Rename "implicit" to "cpu"
---
include/rv/ltl_monitor.h | 32 ++++++++++++++++++++++++++
kernel/trace/rv/Kconfig | 4 ++++
kernel/trace/rv/rv_trace.h | 46 ++++++++++++++++++++++++++++++++++++++
3 files changed, 82 insertions(+)
diff --git a/include/rv/ltl_monitor.h b/include/rv/ltl_monitor.h
index 9dabc5b133a3..5c0197a59db0 100644
--- a/include/rv/ltl_monitor.h
+++ b/include/rv/ltl_monitor.h
@@ -23,12 +23,21 @@
typedef struct task_struct *monitor_target;
+#elif LTL_MONITOR_TYPE == RV_MON_PER_CPU
+
+#define TARGET_PRINT_FORMAT "%u"
+#define TARGET_PRINT_ARGS(cpu) cpu
+
+typedef unsigned int monitor_target;
+
#endif
#ifdef CONFIG_RV_REACTORS
#define RV_MONITOR_NAME CONCATENATE(rv_, MONITOR_NAME)
static struct rv_monitor RV_MONITOR_NAME;
+static struct ltl_monitor *ltl_get_monitor(monitor_target target);
+
static void rv_cond_react(monitor_target target)
{
if (!rv_reacting_on() || !RV_MONITOR_NAME.react)
@@ -54,6 +63,13 @@ static struct ltl_monitor *ltl_get_monitor(monitor_target target)
{
return &target->rv[ltl_monitor_slot].ltl_mon;
}
+#elif LTL_MONITOR_TYPE == RV_MON_PER_CPU
+static DEFINE_PER_CPU(struct ltl_monitor, ltl_monitor);
+
+static struct ltl_monitor *ltl_get_monitor(unsigned int cpu)
+{
+ return per_cpu_ptr(<l_monitor, cpu);
+}
#endif
static void ltl_target_init(monitor_target target, bool target_creation)
@@ -108,6 +124,22 @@ static void ltl_monitor_destroy(void)
rv_put_task_monitor_slot(ltl_monitor_slot);
ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
}
+
+#elif LTL_MONITOR_TYPE == RV_MON_PER_CPU
+
+static int ltl_monitor_init(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ ltl_target_init(cpu, false);
+ return 0;
+}
+
+static void ltl_monitor_destroy(void)
+{
+}
+
#endif
static void ltl_illegal_state(monitor_target target, struct ltl_monitor *mon)
diff --git a/kernel/trace/rv/Kconfig b/kernel/trace/rv/Kconfig
index 5b4be87ba59d..7ef89006ed50 100644
--- a/kernel/trace/rv/Kconfig
+++ b/kernel/trace/rv/Kconfig
@@ -16,6 +16,10 @@ config DA_MON_EVENTS_ID
select RV_MON_MAINTENANCE_EVENTS
bool
+config LTL_MON_EVENTS_CPU
+ select RV_MON_EVENTS
+ bool
+
config LTL_MON_EVENTS_ID
select RV_MON_EVENTS
bool
diff --git a/kernel/trace/rv/rv_trace.h b/kernel/trace/rv/rv_trace.h
index 4a6faddac614..bf7cca6579ec 100644
--- a/kernel/trace/rv/rv_trace.h
+++ b/kernel/trace/rv/rv_trace.h
@@ -177,8 +177,54 @@ DECLARE_EVENT_CLASS(error_ltl_monitor_id,
#include <monitors/pagefault/pagefault_trace.h>
#include <monitors/sleep/sleep_trace.h>
// Add new monitors based on CONFIG_LTL_MON_EVENTS_ID here
+
#endif /* CONFIG_LTL_MON_EVENTS_ID */
+#ifdef CONFIG_LTL_MON_EVENTS_CPU
+DECLARE_EVENT_CLASS(event_ltl_monitor_cpu,
+
+ TP_PROTO(unsigned int cpu, char *states, char *atoms, char *next),
+
+ TP_ARGS(cpu, states, atoms, next),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __string(states, states)
+ __string(atoms, atoms)
+ __string(next, next)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __assign_str(states);
+ __assign_str(atoms);
+ __assign_str(next);
+ ),
+
+ TP_printk("cpu%u: (%s) x (%s) -> (%s)", __entry->cpu,
+ __get_str(states), __get_str(atoms), __get_str(next))
+);
+
+DECLARE_EVENT_CLASS(error_ltl_monitor_cpu,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ ),
+
+ TP_printk("cpu%u: violation detected", __entry->cpu)
+);
+// Add new monitors based on CONFIG_LTL_MON_EVENTS_CPU here
+
+#endif /* CONFIG_LTL_MON_EVENTS_CPU */
+
#ifdef CONFIG_RV_MON_MAINTENANCE_EVENTS
/* Tracepoint useful for monitors development, currenly only used in DA */
TRACE_EVENT(rv_retries_error,
--
2.39.5
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v3 3/5] verification/rvgen/ltl: Support per-cpu monitor generation
2025-08-11 8:40 [PATCH v3 0/5] rv: LTL per-cpu monitor type and real-time scheduling monitor Nam Cao
2025-08-11 8:40 ` [PATCH v3 1/5] rv/ltl: Prepare for other monitor types Nam Cao
2025-08-11 8:40 ` [PATCH v3 2/5] rv/ltl: Support per-cpu monitors Nam Cao
@ 2025-08-11 8:40 ` Nam Cao
2025-08-11 8:40 ` [PATCH v3 4/5] sched: Add task enqueue/dequeue trace points Nam Cao
2025-08-11 8:40 ` [PATCH v3 5/5] rv: Add rts monitor Nam Cao
4 siblings, 0 replies; 7+ messages in thread
From: Nam Cao @ 2025-08-11 8:40 UTC (permalink / raw)
To: Steven Rostedt, Masami Hiramatsu, Mathieu Desnoyers,
Gabriele Monaco, linux-trace-kernel, linux-kernel
Cc: Nam Cao
Add support to generate per-cpu LTL monitors. Similar to generating per-cpu
monitors from .dot files, the "-t per_cpu" parameter can be used to
generate per-cpu monitors from .ltl files.
Reviewed-by: Gabriele Monaco <gmonaco@redhat.com>
Signed-off-by: Nam Cao <namcao@linutronix.de>
---
v3: re-write the description in ltl_atoms_init() to be more precise
v2: Rename "implicit" to "cpu"
---
tools/verification/rvgen/rvgen/ltl2k.py | 48 ++++++++++++++++---
.../rvgen/rvgen/templates/ltl2k/main.c | 9 ++--
.../rvgen/rvgen/templates/ltl2k/trace.h | 7 ++-
3 files changed, 50 insertions(+), 14 deletions(-)
diff --git a/tools/verification/rvgen/rvgen/ltl2k.py b/tools/verification/rvgen/rvgen/ltl2k.py
index b075f98d50c4..f291d1f03d05 100644
--- a/tools/verification/rvgen/rvgen/ltl2k.py
+++ b/tools/verification/rvgen/rvgen/ltl2k.py
@@ -57,9 +57,16 @@ class ltl2k(generator.Monitor):
template_dir = "ltl2k"
def __init__(self, file_path, MonitorType, extra_params={}):
- if MonitorType != "per_task":
- raise NotImplementedError("Only per_task monitor is supported for LTL")
+ if MonitorType == "per_task":
+ self._target_arg = "struct task_struct *task"
+ self._target = "task"
+ elif MonitorType == "per_cpu":
+ self._target_arg = "unsigned int cpu"
+ self._target = "cpu"
+ else:
+ raise NotImplementedError(f"LTL does not support monitor type {MonitorType}")
super().__init__(extra_params)
+ self.monitor_type = MonitorType
with open(file_path) as f:
self.atoms, self.ba, self.ltl = ltl2ba.create_graph(f.read())
self.atoms_abbr = abbreviate_atoms(self.atoms)
@@ -67,6 +74,13 @@ class ltl2k(generator.Monitor):
if not self.name:
self.name = Path(file_path).stem
+ def _fill_monitor_type(self) -> str:
+ if self.monitor_type == "per_task":
+ return "#define LTL_MONITOR_TYPE RV_MON_PER_TASK"
+ if self.monitor_type == "per_cpu":
+ return "#define LTL_MONITOR_TYPE RV_MON_PER_CPU"
+ assert False
+
def _fill_states(self) -> str:
buf = [
"enum ltl_buchi_state {",
@@ -174,7 +188,7 @@ class ltl2k(generator.Monitor):
def _fill_start(self):
buf = [
- "static void ltl_start(struct task_struct *task, struct ltl_monitor *mon)",
+ "static void ltl_start(%s, struct ltl_monitor *mon)" % self._target_arg,
"{"
]
@@ -205,7 +219,7 @@ class ltl2k(generator.Monitor):
buff = []
buff.append("static void handle_example_event(void *data, /* XXX: fill header */)")
buff.append("{")
- buff.append("\tltl_atom_update(task, LTL_%s, true/false);" % self.atoms[0])
+ buff.append("\tltl_atom_update(%s, LTL_%s, true/false);" % (self._target, self.atoms[0]))
buff.append("}")
buff.append("")
return '\n'.join(buff)
@@ -241,6 +255,9 @@ class ltl2k(generator.Monitor):
""
]
+ buf.append(self._fill_monitor_type())
+ buf.append('')
+
buf.extend(self._fill_atoms())
buf.append('')
@@ -259,13 +276,32 @@ class ltl2k(generator.Monitor):
return '\n'.join(buf)
def fill_monitor_class_type(self):
- return "LTL_MON_EVENTS_ID"
+ if self.monitor_type == "per_task":
+ return "LTL_MON_EVENTS_ID"
+ if self.monitor_type == "per_cpu":
+ return "LTL_MON_EVENTS_CPU"
+ assert False
def fill_monitor_class(self):
- return "ltl_monitor_id"
+ if self.monitor_type == "per_task":
+ return "ltl_monitor_id"
+ if self.monitor_type == "per_cpu":
+ return "ltl_monitor_cpu"
+ assert False
+
+ def fill_tracepoint_args_skel(self, tp_type):
+ if tp_type == "event":
+ return \
+ ("\tTP_PROTO(%s, char *states, char *atoms, char *next),\n" % self._target_arg) + \
+ ("\tTP_ARGS(%s, states, atoms, next)" % self._target)
+ if tp_type == "error":
+ return \
+ ("\tTP_PROTO(%s),\n" % self._target_arg) + \
+ ("\tTP_ARGS(%s)" % self._target)
def fill_main_c(self):
main_c = super().fill_main_c()
main_c = main_c.replace("%%ATOMS_INIT%%", self.fill_atoms_init())
+ main_c = main_c.replace("%%TARGET_ARG%%", self._target_arg)
return main_c
diff --git a/tools/verification/rvgen/rvgen/templates/ltl2k/main.c b/tools/verification/rvgen/rvgen/templates/ltl2k/main.c
index f85d076fbf78..c6b51a04c360 100644
--- a/tools/verification/rvgen/rvgen/templates/ltl2k/main.c
+++ b/tools/verification/rvgen/rvgen/templates/ltl2k/main.c
@@ -23,7 +23,7 @@
#include "%%MODEL_NAME%%.h"
#include <rv/ltl_monitor.h>
-static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon)
+static void ltl_atoms_fetch(%%TARGET_ARG%%, struct ltl_monitor *mon)
{
/*
* This is called everytime the Buchi automaton is triggered.
@@ -36,13 +36,14 @@ static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon)
*/
}
-static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation)
+static void ltl_atoms_init(%%TARGET_ARG%%, struct ltl_monitor *mon, bool target_creation)
{
/*
* This should initialize as many atomic propositions as possible.
*
- * @task_creation indicates whether the task is being created. This is
- * false if the task is already running before the monitor is enabled.
+ * @target_creation indicates whether the monitored target is being
+ * created. This is false if the monitor target exists already before
+ * the monitor is enabled.
*/
%%ATOMS_INIT%%
}
diff --git a/tools/verification/rvgen/rvgen/templates/ltl2k/trace.h b/tools/verification/rvgen/rvgen/templates/ltl2k/trace.h
index 49394c4b0f1c..87d3a1308926 100644
--- a/tools/verification/rvgen/rvgen/templates/ltl2k/trace.h
+++ b/tools/verification/rvgen/rvgen/templates/ltl2k/trace.h
@@ -6,9 +6,8 @@
#ifdef CONFIG_RV_MON_%%MODEL_NAME_UP%%
DEFINE_EVENT(event_%%MONITOR_CLASS%%, event_%%MODEL_NAME%%,
- TP_PROTO(struct task_struct *task, char *states, char *atoms, char *next),
- TP_ARGS(task, states, atoms, next));
+%%TRACEPOINT_ARGS_SKEL_EVENT%%);
+
DEFINE_EVENT(error_%%MONITOR_CLASS%%, error_%%MODEL_NAME%%,
- TP_PROTO(struct task_struct *task),
- TP_ARGS(task));
+%%TRACEPOINT_ARGS_SKEL_ERROR%%);
#endif /* CONFIG_RV_MON_%%MODEL_NAME_UP%% */
--
2.39.5
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v3 4/5] sched: Add task enqueue/dequeue trace points
2025-08-11 8:40 [PATCH v3 0/5] rv: LTL per-cpu monitor type and real-time scheduling monitor Nam Cao
` (2 preceding siblings ...)
2025-08-11 8:40 ` [PATCH v3 3/5] verification/rvgen/ltl: Support per-cpu monitor generation Nam Cao
@ 2025-08-11 8:40 ` Nam Cao
2025-08-13 6:50 ` Gabriele Monaco
2025-08-11 8:40 ` [PATCH v3 5/5] rv: Add rts monitor Nam Cao
4 siblings, 1 reply; 7+ messages in thread
From: Nam Cao @ 2025-08-11 8:40 UTC (permalink / raw)
To: Steven Rostedt, Masami Hiramatsu, Mathieu Desnoyers,
Gabriele Monaco, linux-trace-kernel, linux-kernel
Cc: Nam Cao, Ingo Molnar, Peter Zijlstra, Juri Lelli, Vincent Guittot,
Dietmar Eggemann, Ben Segall, Mel Gorman, Valentin Schneider,
K Prateek Nayak
Add trace points into enqueue_task() and dequeue_task(). They are useful to
implement RV monitor which validates RT scheduling.
Signed-off-by: Nam Cao <namcao@linutronix.de>
---
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Ben Segall <bsegall@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
---
v3: fix up build issue on !SMP
v2: Move the tracepoints to cover all task enqueue/dequeue, not just RT
---
include/trace/events/sched.h | 13 +++++++++++++
kernel/sched/core.c | 8 +++++++-
2 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 7b2645b50e78..696d22af5a98 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -896,6 +896,19 @@ DECLARE_TRACE(sched_set_need_resched,
TP_PROTO(struct task_struct *tsk, int cpu, int tif),
TP_ARGS(tsk, cpu, tif));
+/*
+ * The two trace points below may not work as expected for fair tasks due
+ * to delayed dequeue. See:
+ * https://lore.kernel.org/lkml/179674c6-f82a-4718-ace2-67b5e672fdee@amd.com/
+ */
+DECLARE_TRACE(enqueue_task,
+ TP_PROTO(int cpu, struct task_struct *task),
+ TP_ARGS(cpu, task));
+
+DECLARE_TRACE(dequeue_task,
+ TP_PROTO(int cpu, struct task_struct *task),
+ TP_ARGS(cpu, task));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index be00629f0ba4..6367799aa023 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2077,6 +2077,8 @@ unsigned long get_wchan(struct task_struct *p)
void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
+ trace_enqueue_task_tp(cpu_of(rq), p);
+
if (!(flags & ENQUEUE_NOCLOCK))
update_rq_clock(rq);
@@ -2119,7 +2121,11 @@ inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
* and mark the task ->sched_delayed.
*/
uclamp_rq_dec(rq, p);
- return p->sched_class->dequeue_task(rq, p, flags);
+ if (p->sched_class->dequeue_task(rq, p, flags)) {
+ trace_dequeue_task_tp(cpu_of(rq), p);
+ return true;
+ }
+ return false;
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
--
2.39.5
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v3 5/5] rv: Add rts monitor
2025-08-11 8:40 [PATCH v3 0/5] rv: LTL per-cpu monitor type and real-time scheduling monitor Nam Cao
` (3 preceding siblings ...)
2025-08-11 8:40 ` [PATCH v3 4/5] sched: Add task enqueue/dequeue trace points Nam Cao
@ 2025-08-11 8:40 ` Nam Cao
4 siblings, 0 replies; 7+ messages in thread
From: Nam Cao @ 2025-08-11 8:40 UTC (permalink / raw)
To: Steven Rostedt, Masami Hiramatsu, Mathieu Desnoyers,
Gabriele Monaco, linux-trace-kernel, linux-kernel
Cc: Nam Cao, Ingo Molnar, Peter Zijlstra, Juri Lelli, Vincent Guittot,
Dietmar Eggemann, Ben Segall, Mel Gorman, Valentin Schneider,
K Prateek Nayak
Add "real-time scheduling" monitor, which validates that SCHED_RR and
SCHED_FIFO tasks are scheduled before tasks with normal and extensible
scheduling policies
Reviewed-by: Gabriele Monaco <gmonaco@redhat.com>
Signed-off-by: Nam Cao <namcao@linutronix.de>
---
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Ben Segall <bsegall@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
---
v3:
- move document file and specification file to be under rtapp
v2:
- use the new tracepoints
- move to be under the rtapp container monitor
- re-generate with the modified scripts
- fixup incorrect enqueued status
---
Documentation/trace/rv/monitor_rtapp.rst | 19 +++
kernel/trace/rv/Kconfig | 1 +
kernel/trace/rv/Makefile | 1 +
kernel/trace/rv/monitors/rts/Kconfig | 17 +++
kernel/trace/rv/monitors/rts/rts.c | 144 +++++++++++++++++++++++
kernel/trace/rv/monitors/rts/rts.h | 126 ++++++++++++++++++++
kernel/trace/rv/monitors/rts/rts_trace.h | 15 +++
kernel/trace/rv/rv_trace.h | 1 +
tools/verification/models/rtapp/rts.ltl | 5 +
9 files changed, 329 insertions(+)
create mode 100644 kernel/trace/rv/monitors/rts/Kconfig
create mode 100644 kernel/trace/rv/monitors/rts/rts.c
create mode 100644 kernel/trace/rv/monitors/rts/rts.h
create mode 100644 kernel/trace/rv/monitors/rts/rts_trace.h
create mode 100644 tools/verification/models/rtapp/rts.ltl
diff --git a/Documentation/trace/rv/monitor_rtapp.rst b/Documentation/trace/rv/monitor_rtapp.rst
index c8104eda924a..40001ace5612 100644
--- a/Documentation/trace/rv/monitor_rtapp.rst
+++ b/Documentation/trace/rv/monitor_rtapp.rst
@@ -131,3 +131,22 @@ special cases:
real-time-safe because preemption is disabled for the duration.
- `FUTEX_LOCK_PI` is included in the allowlist for the same reason as
`BLOCK_ON_RT_MUTEX`.
+
+Monitor rts
++++++++++++
+
+The real-time scheduling monitor validates that tasks with real-time scheduling
+policies (`SCHED_FIFO` and `SCHED_RR`) are always scheduled before tasks with
+normal and extensible scheduling policies (`SCHED_OTHER`, `SCHED_BATCH`,
+`SCHED_IDLE`, `SCHED_EXT`):
+
+.. literalinclude:: ../../../tools/verification/models/rtapp/rts.ltl
+
+Note that this monitor may report errors if real-time throttling or fair
+deadline server is enabled. These mechanisms prevent real-time tasks from
+monopolying the CPU by giving tasks with normal and extensible scheduling
+policies a chance to run. They give system administrators a chance to kill a
+misbehaved real-time task. However, they violate the scheduling priorities and
+may cause latency to well-behaved real-time tasks. Thus, if you see errors from
+this monitor, consider disabling real-time throttling and the fair deadline
+server.
diff --git a/kernel/trace/rv/Kconfig b/kernel/trace/rv/Kconfig
index 7ef89006ed50..e9007ed32aea 100644
--- a/kernel/trace/rv/Kconfig
+++ b/kernel/trace/rv/Kconfig
@@ -67,6 +67,7 @@ source "kernel/trace/rv/monitors/opid/Kconfig"
source "kernel/trace/rv/monitors/rtapp/Kconfig"
source "kernel/trace/rv/monitors/pagefault/Kconfig"
source "kernel/trace/rv/monitors/sleep/Kconfig"
+source "kernel/trace/rv/monitors/rts/Kconfig"
# Add new rtapp monitors here
# Add new monitors here
diff --git a/kernel/trace/rv/Makefile b/kernel/trace/rv/Makefile
index 750e4ad6fa0f..d7bfc7ae6677 100644
--- a/kernel/trace/rv/Makefile
+++ b/kernel/trace/rv/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_RV_MON_STS) += monitors/sts/sts.o
obj-$(CONFIG_RV_MON_NRP) += monitors/nrp/nrp.o
obj-$(CONFIG_RV_MON_SSSW) += monitors/sssw/sssw.o
obj-$(CONFIG_RV_MON_OPID) += monitors/opid/opid.o
+obj-$(CONFIG_RV_MON_RTS) += monitors/rts/rts.o
# Add new monitors here
obj-$(CONFIG_RV_REACTORS) += rv_reactors.o
obj-$(CONFIG_RV_REACT_PRINTK) += reactor_printk.o
diff --git a/kernel/trace/rv/monitors/rts/Kconfig b/kernel/trace/rv/monitors/rts/Kconfig
new file mode 100644
index 000000000000..5481b371bce1
--- /dev/null
+++ b/kernel/trace/rv/monitors/rts/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+config RV_MON_RTS
+ depends on RV
+ select RV_LTL_MONITOR
+ depends on RV_MON_RTAPP
+ default y
+ select LTL_MON_EVENTS_CPU
+ bool "rts monitor"
+ help
+ Add support for RTS (real-time scheduling) monitor which validates
+ that real-time-priority tasks are scheduled before SCHED_OTHER tasks.
+
+ This monitor may report an error if RT throttling or deadline server
+ is enabled.
+
+ Say Y if you are debugging or testing a real-time system.
diff --git a/kernel/trace/rv/monitors/rts/rts.c b/kernel/trace/rv/monitors/rts/rts.c
new file mode 100644
index 000000000000..b4c3d3a4671d
--- /dev/null
+++ b/kernel/trace/rv/monitors/rts/rts.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ftrace.h>
+#include <linux/tracepoint.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rv.h>
+#include <linux/sched/deadline.h>
+#include <linux/sched/rt.h>
+#include <rv/instrumentation.h>
+
+#define MODULE_NAME "rts"
+
+#include <trace/events/sched.h>
+#include <rv_trace.h>
+#include <monitors/rtapp/rtapp.h>
+
+#include "rts.h"
+#include <rv/ltl_monitor.h>
+
+static DEFINE_PER_CPU(unsigned int, nr_queued);
+
+static void ltl_atoms_fetch(unsigned int cpu, struct ltl_monitor *mon)
+{
+}
+
+static void ltl_atoms_init(unsigned int cpu, struct ltl_monitor *mon,
+ bool target_creation)
+{
+ ltl_atom_set(mon, LTL_SCHED_SWITCH, false);
+ ltl_atom_set(mon, LTL_SCHED_SWITCH_DL, false);
+ ltl_atom_set(mon, LTL_SCHED_SWITCH_RT, false);
+
+ /*
+ * This may not be accurate, there may be enqueued RT tasks. But that's
+ * okay, the worst we get is a false negative. It will be accurate as
+ * soon as the CPU no longer has any queued RT task.
+ */
+ ltl_atom_set(mon, LTL_RT_TASK_ENQUEUED, false);
+}
+
+static void handle_enqueue_task(void *data, int cpu, struct task_struct *task)
+{
+ unsigned int *queued = per_cpu_ptr(&nr_queued, cpu);
+
+ if (!rt_task(task))
+ return;
+
+ (*queued)++;
+ ltl_atom_update(cpu, LTL_RT_TASK_ENQUEUED, true);
+}
+
+static void handle_dequeue_task(void *data, int cpu, struct task_struct *task)
+{
+ unsigned int *queued = per_cpu_ptr(&nr_queued, cpu);
+
+ if (!rt_task(task))
+ return;
+
+ /*
+ * This may not be accurate for a short time after the monitor is
+ * enabled, because there may be enqueued RT tasks which are not counted
+ * torward nr_queued. But that's okay, the worst we get is a false
+ * negative. nr_queued will be accurate as soon as the CPU no longer has
+ * any queued RT task.
+ */
+ if (*queued)
+ (*queued)--;
+ if (!*queued)
+ ltl_atom_update(cpu, LTL_RT_TASK_ENQUEUED, false);
+}
+
+static void handle_sched_switch(void *data, bool preempt, struct task_struct *prev,
+ struct task_struct *next, unsigned int prev_state)
+{
+ unsigned int cpu = smp_processor_id();
+ struct ltl_monitor *mon = ltl_get_monitor(cpu);
+
+ ltl_atom_set(mon, LTL_SCHED_SWITCH_RT, rt_task(next));
+ ltl_atom_set(mon, LTL_SCHED_SWITCH_DL, dl_task(next));
+ ltl_atom_update(cpu, LTL_SCHED_SWITCH, true);
+
+ ltl_atom_set(mon, LTL_SCHED_SWITCH_RT, false);
+ ltl_atom_set(mon, LTL_SCHED_SWITCH_DL, false);
+ ltl_atom_update(cpu, LTL_SCHED_SWITCH, false);
+}
+
+static int enable_rts(void)
+{
+ unsigned int cpu;
+ int retval;
+
+ retval = ltl_monitor_init();
+ if (retval)
+ return retval;
+
+ for_each_possible_cpu(cpu) {
+ unsigned int *queued = per_cpu_ptr(&nr_queued, cpu);
+
+ *queued = 0;
+ }
+
+ rv_attach_trace_probe("rts", dequeue_task_tp, handle_dequeue_task);
+ rv_attach_trace_probe("rts", enqueue_task_tp, handle_enqueue_task);
+ rv_attach_trace_probe("rts", sched_switch, handle_sched_switch);
+
+ return 0;
+}
+
+static void disable_rts(void)
+{
+ rv_detach_trace_probe("rts", sched_switch, handle_sched_switch);
+ rv_detach_trace_probe("rts", enqueue_task_tp, handle_enqueue_task);
+ rv_detach_trace_probe("rts", dequeue_task_tp, handle_dequeue_task);
+
+ ltl_monitor_destroy();
+}
+
+/*
+ * This is the monitor register section.
+ */
+static struct rv_monitor rv_rts = {
+ .name = "rts",
+ .description = "Validate that real-time tasks are scheduled before lower-priority tasks",
+ .enable = enable_rts,
+ .disable = disable_rts,
+};
+
+static int __init register_rts(void)
+{
+ return rv_register_monitor(&rv_rts, &rv_rtapp);
+}
+
+static void __exit unregister_rts(void)
+{
+ rv_unregister_monitor(&rv_rts);
+}
+
+module_init(register_rts);
+module_exit(unregister_rts);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nam Cao <namcao@linutronix.de>");
+MODULE_DESCRIPTION("rts: Validate that real-time tasks are scheduled before lower-priority tasks");
diff --git a/kernel/trace/rv/monitors/rts/rts.h b/kernel/trace/rv/monitors/rts/rts.h
new file mode 100644
index 000000000000..5881f30a38ce
--- /dev/null
+++ b/kernel/trace/rv/monitors/rts/rts.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * C implementation of Buchi automaton, automatically generated by
+ * tools/verification/rvgen from the linear temporal logic specification.
+ * For further information, see kernel documentation:
+ * Documentation/trace/rv/linear_temporal_logic.rst
+ */
+
+#include <linux/rv.h>
+
+#define MONITOR_NAME rts
+
+#define LTL_MONITOR_TYPE RV_MON_PER_CPU
+
+enum ltl_atom {
+ LTL_RT_TASK_ENQUEUED,
+ LTL_SCHED_SWITCH,
+ LTL_SCHED_SWITCH_DL,
+ LTL_SCHED_SWITCH_RT,
+ LTL_NUM_ATOM
+};
+static_assert(LTL_NUM_ATOM <= RV_MAX_LTL_ATOM);
+
+static const char *ltl_atom_str(enum ltl_atom atom)
+{
+ static const char *const names[] = {
+ "rt_ta_en",
+ "sc_sw",
+ "sc_sw_dl",
+ "sc_sw_rt",
+ };
+
+ return names[atom];
+}
+
+enum ltl_buchi_state {
+ S0,
+ S1,
+ S2,
+ S3,
+ S4,
+ RV_NUM_BA_STATES
+};
+static_assert(RV_NUM_BA_STATES <= RV_MAX_BA_STATES);
+
+static void ltl_start(unsigned int cpu, struct ltl_monitor *mon)
+{
+ bool sched_switch_rt = test_bit(LTL_SCHED_SWITCH_RT, mon->atoms);
+ bool sched_switch_dl = test_bit(LTL_SCHED_SWITCH_DL, mon->atoms);
+ bool sched_switch = test_bit(LTL_SCHED_SWITCH, mon->atoms);
+ bool rt_task_enqueued = test_bit(LTL_RT_TASK_ENQUEUED, mon->atoms);
+ bool val13 = !rt_task_enqueued;
+ bool val8 = sched_switch_dl || val13;
+ bool val9 = sched_switch_rt || val8;
+ bool val6 = !sched_switch;
+ bool val1 = !rt_task_enqueued;
+
+ if (val1)
+ __set_bit(S0, mon->states);
+ if (val6)
+ __set_bit(S1, mon->states);
+ if (val9)
+ __set_bit(S4, mon->states);
+}
+
+static void
+ltl_possible_next_states(struct ltl_monitor *mon, unsigned int state, unsigned long *next)
+{
+ bool sched_switch_rt = test_bit(LTL_SCHED_SWITCH_RT, mon->atoms);
+ bool sched_switch_dl = test_bit(LTL_SCHED_SWITCH_DL, mon->atoms);
+ bool sched_switch = test_bit(LTL_SCHED_SWITCH, mon->atoms);
+ bool rt_task_enqueued = test_bit(LTL_RT_TASK_ENQUEUED, mon->atoms);
+ bool val13 = !rt_task_enqueued;
+ bool val8 = sched_switch_dl || val13;
+ bool val9 = sched_switch_rt || val8;
+ bool val6 = !sched_switch;
+ bool val1 = !rt_task_enqueued;
+
+ switch (state) {
+ case S0:
+ if (val1)
+ __set_bit(S0, next);
+ if (val6)
+ __set_bit(S1, next);
+ if (val9)
+ __set_bit(S4, next);
+ break;
+ case S1:
+ if (val6)
+ __set_bit(S1, next);
+ if (val1 && val6)
+ __set_bit(S2, next);
+ if (val1 && val9)
+ __set_bit(S3, next);
+ if (val9)
+ __set_bit(S4, next);
+ break;
+ case S2:
+ if (val6)
+ __set_bit(S1, next);
+ if (val1 && val6)
+ __set_bit(S2, next);
+ if (val1 && val9)
+ __set_bit(S3, next);
+ if (val9)
+ __set_bit(S4, next);
+ break;
+ case S3:
+ if (val1)
+ __set_bit(S0, next);
+ if (val6)
+ __set_bit(S1, next);
+ if (val9)
+ __set_bit(S4, next);
+ break;
+ case S4:
+ if (val1)
+ __set_bit(S0, next);
+ if (val6)
+ __set_bit(S1, next);
+ if (val9)
+ __set_bit(S4, next);
+ break;
+ }
+}
diff --git a/kernel/trace/rv/monitors/rts/rts_trace.h b/kernel/trace/rv/monitors/rts/rts_trace.h
new file mode 100644
index 000000000000..ac4ea84162f7
--- /dev/null
+++ b/kernel/trace/rv/monitors/rts/rts_trace.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Snippet to be included in rv_trace.h
+ */
+
+#ifdef CONFIG_RV_MON_RTS
+DEFINE_EVENT(event_ltl_monitor_cpu, event_rts,
+ TP_PROTO(unsigned int cpu, char *states, char *atoms, char *next),
+ TP_ARGS(cpu, states, atoms, next));
+
+DEFINE_EVENT(error_ltl_monitor_cpu, error_rts,
+ TP_PROTO(unsigned int cpu),
+ TP_ARGS(cpu));
+#endif /* CONFIG_RV_MON_RTS */
diff --git a/kernel/trace/rv/rv_trace.h b/kernel/trace/rv/rv_trace.h
index bf7cca6579ec..7b3a6fb8ca6f 100644
--- a/kernel/trace/rv/rv_trace.h
+++ b/kernel/trace/rv/rv_trace.h
@@ -221,6 +221,7 @@ DECLARE_EVENT_CLASS(error_ltl_monitor_cpu,
TP_printk("cpu%u: violation detected", __entry->cpu)
);
+#include <monitors/rts/rts_trace.h>
// Add new monitors based on CONFIG_LTL_MON_EVENTS_CPU here
#endif /* CONFIG_LTL_MON_EVENTS_CPU */
diff --git a/tools/verification/models/rtapp/rts.ltl b/tools/verification/models/rtapp/rts.ltl
new file mode 100644
index 000000000000..90872bca46b1
--- /dev/null
+++ b/tools/verification/models/rtapp/rts.ltl
@@ -0,0 +1,5 @@
+RULE = always (RT_TASK_ENQUEUED imply SCHEDULE_RT_NEXT)
+
+SCHEDULE_RT_NEXT = (not SCHED_SWITCH) until (SCHED_SWITCH_RT or EXCEPTIONS)
+
+EXCEPTIONS = SCHED_SWITCH_DL or not RT_TASK_ENQUEUED
--
2.39.5
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH v3 4/5] sched: Add task enqueue/dequeue trace points
2025-08-11 8:40 ` [PATCH v3 4/5] sched: Add task enqueue/dequeue trace points Nam Cao
@ 2025-08-13 6:50 ` Gabriele Monaco
0 siblings, 0 replies; 7+ messages in thread
From: Gabriele Monaco @ 2025-08-13 6:50 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra, Nam Cao, Steven Rostedt,
linux-trace-kernel, linux-kernel
Cc: Juri Lelli, Vincent Guittot, Masami Hiramatsu, Mathieu Desnoyers,
Dietmar Eggemann, Ben Segall, Mel Gorman, Valentin Schneider,
K Prateek Nayak
On Mon, 2025-08-11 at 10:40 +0200, Nam Cao wrote:
> Add trace points into enqueue_task() and dequeue_task(). They are
> useful to implement RV monitor which validates RT scheduling.
>
> Signed-off-by: Nam Cao <namcao@linutronix.de>
> ---
Peter, Ingo, this patch adds new tracepoints in the scheduler do agree
with the change, can we get an Ack?
Thanks,
Gabriele
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Peter Zijlstra <peterz@infradead.org>
> Cc: Juri Lelli <juri.lelli@redhat.com>
> Cc: Vincent Guittot <vincent.guittot@linaro.org>
> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
> Cc: Ben Segall <bsegall@google.com>
> Cc: Mel Gorman <mgorman@suse.de>
> Cc: Valentin Schneider <vschneid@redhat.com>
> Cc: K Prateek Nayak <kprateek.nayak@amd.com>
> ---
> v3: fix up build issue on !SMP
> v2: Move the tracepoints to cover all task enqueue/dequeue, not just
> RT
> ---
> include/trace/events/sched.h | 13 +++++++++++++
> kernel/sched/core.c | 8 +++++++-
> 2 files changed, 20 insertions(+), 1 deletion(-)
>
> diff --git a/include/trace/events/sched.h
> b/include/trace/events/sched.h
> index 7b2645b50e78..696d22af5a98 100644
> --- a/include/trace/events/sched.h
> +++ b/include/trace/events/sched.h
> @@ -896,6 +896,19 @@ DECLARE_TRACE(sched_set_need_resched,
> TP_PROTO(struct task_struct *tsk, int cpu, int tif),
> TP_ARGS(tsk, cpu, tif));
>
> +/*
> + * The two trace points below may not work as expected for fair
> tasks due
> + * to delayed dequeue. See:
> + *
> https://lore.kernel.org/lkml/179674c6-f82a-4718-ace2-67b5e672fdee@amd.com/
> + */
> +DECLARE_TRACE(enqueue_task,
> + TP_PROTO(int cpu, struct task_struct *task),
> + TP_ARGS(cpu, task));
> +
> +DECLARE_TRACE(dequeue_task,
> + TP_PROTO(int cpu, struct task_struct *task),
> + TP_ARGS(cpu, task));
> +
> #endif /* _TRACE_SCHED_H */
>
> /* This part must be outside protection */
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index be00629f0ba4..6367799aa023 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -2077,6 +2077,8 @@ unsigned long get_wchan(struct task_struct *p)
>
> void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
> {
> + trace_enqueue_task_tp(cpu_of(rq), p);
> +
> if (!(flags & ENQUEUE_NOCLOCK))
> update_rq_clock(rq);
>
> @@ -2119,7 +2121,11 @@ inline bool dequeue_task(struct rq *rq, struct
> task_struct *p, int flags)
> * and mark the task ->sched_delayed.
> */
> uclamp_rq_dec(rq, p);
> - return p->sched_class->dequeue_task(rq, p, flags);
> + if (p->sched_class->dequeue_task(rq, p, flags)) {
> + trace_dequeue_task_tp(cpu_of(rq), p);
> + return true;
> + }
> + return false;
> }
>
> void activate_task(struct rq *rq, struct task_struct *p, int flags)
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2025-08-13 6:50 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-08-11 8:40 [PATCH v3 0/5] rv: LTL per-cpu monitor type and real-time scheduling monitor Nam Cao
2025-08-11 8:40 ` [PATCH v3 1/5] rv/ltl: Prepare for other monitor types Nam Cao
2025-08-11 8:40 ` [PATCH v3 2/5] rv/ltl: Support per-cpu monitors Nam Cao
2025-08-11 8:40 ` [PATCH v3 3/5] verification/rvgen/ltl: Support per-cpu monitor generation Nam Cao
2025-08-11 8:40 ` [PATCH v3 4/5] sched: Add task enqueue/dequeue trace points Nam Cao
2025-08-13 6:50 ` Gabriele Monaco
2025-08-11 8:40 ` [PATCH v3 5/5] rv: Add rts monitor Nam Cao
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).