From: Keith Owens <kaos@sgi.com>
To: linux-ia64@vger.kernel.org
Subject: Useful patch for testing salinfo decoding
Date: Thu, 15 Dec 2005 07:24:47 +0000 [thread overview]
Message-ID: <13993.1134631487@kao2.melbourne.sgi.com> (raw)
This patch is useful for testing salinfo decoding. It lets you inject
test records into the kernel. Sample use:
cat 2005-09-28-18_53_09-cpu0-cpe.0 > /proc/sal/cpe/inject
No warranty is expressed or implied ;)
Index: linux/arch/ia64/kernel/salinfo.c
=================================--- linux.orig/arch/ia64/kernel/salinfo.c 2005-12-01 13:19:38.859297686 +1100
+++ linux/arch/ia64/kernel/salinfo.c 2005-12-02 13:34:35.773150800 +1100
@@ -76,7 +76,7 @@ static char *salinfo_log_name[] = {
static struct proc_dir_entry *salinfo_proc_entries[
ARRAY_SIZE(salinfo_entries) + /* /proc/sal/bus_lock */
ARRAY_SIZE(salinfo_log_name) + /* /proc/sal/{mca,...} */
- (2 * ARRAY_SIZE(salinfo_log_name)) + /* /proc/sal/mca/{event,data} */
+ (3 * ARRAY_SIZE(salinfo_log_name)) + /* /proc/sal/mca/{event,data,inject} */
1]; /* /proc/sal */
/* Some records we get ourselves, some are accessed as saved data in buffers
@@ -144,6 +144,7 @@ struct salinfo_data {
u8 padding;
int cpu_check; /* next CPU to check */
struct salinfo_data_saved data_saved[5];/* save last 5 records from mca.c, must be < 255 */
+ struct salinfo_data_saved inject; /* data about injected record for testing */
};
static struct salinfo_data salinfo_data[ARRAY_SIZE(salinfo_log_name)];
@@ -241,6 +242,92 @@ salinfo_log_wakeup(int type, u8 *buffer,
}
}
+/* The /proc/sal/{mca,..}/inject files are write only. They allow user space
+ * to inject records of each type and have them processed as if the record was
+ * generated by SAL. A record to be injected is written in raw format to the
+ * relevant inject file and stored via the salinfo_data.inject field. When the
+ * inject file is closed, the record is processed as if it came from SAL,
+ * including driving the event and data files.
+ *
+ * Because this is a testing only patch, no verification is done on the record
+ * format, GIGO applies. Multiple programs doing injection at the same time
+ * will create corrupted records and probably oops the kernel. The records
+ * appear to come from various cpus, depending on which cpu the injection code
+ * runs on. Seeking on the inject file is not supported, this code only
+ * handles sequential write.
+ */
+
+static DECLARE_MUTEX(inject_sem);
+
+static int
+salinfo_inject_open(struct inode *inode, struct file *file)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return 0;
+}
+
+static int
+salinfo_inject_release(struct inode *inode, struct file *file)
+{
+ struct proc_dir_entry *entry = PDE(inode);
+ struct salinfo_data *data = entry->data;
+ int irq_safe = data->type != SAL_INFO_TYPE_MCA;
+ down(&inject_sem);
+ if (!data->inject.size) {
+ vfree(data->inject.buffer);
+ data->inject.id = 0;
+ } else {
+ preempt_disable();
+ salinfo_log_wakeup(data->type, data->inject.buffer, data->inject.size, irq_safe);
+ preempt_enable();
+ }
+ up(&inject_sem);
+ return 0;
+}
+
+static ssize_t
+salinfo_inject_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct proc_dir_entry *entry = PDE(inode);
+ struct salinfo_data *data = entry->data;
+ int ret = 0;
+ down(&inject_sem);
+ /* Hack: reuse data->inject.id to hold the physical size of the buffer */
+ if (*ppos + count > data->inject.id) {
+ u64 new_size = *ppos + count;
+ u8* new_buffer;
+ new_buffer = vmalloc(new_size);
+ if (!new_buffer) {
+ ret = -ENOMEM;
+ } else {
+ memcpy(new_buffer, data->inject.buffer, data->inject.size);
+ vfree(data->inject.buffer);
+ data->inject.buffer = new_buffer;
+ data->inject.id = new_size;
+ }
+ }
+ if (!ret)
+ if ((ret = copy_from_user(data->inject.buffer + *ppos, buffer, count)))
+ ret = -EFAULT;
+
+ up(&inject_sem);
+ if (ret) {
+ return ret;
+ } else {
+ *ppos += count;
+ data->inject.size = *ppos;
+ return count;
+ }
+}
+
+static struct file_operations salinfo_inject_fops = {
+ .open = salinfo_inject_open,
+ .release = salinfo_inject_release,
+ .write = salinfo_inject_write,
+};
+
/* Check for outstanding MCA/INIT records every minute (arbitrary) */
#define SALINFO_TIMER_DELAY (60*HZ)
static struct timer_list salinfo_timer;
@@ -410,6 +497,7 @@ salinfo_log_new_read(int cpu, struct sal
int saved_size = ARRAY_SIZE(data->data_saved);
data->saved_num = 0;
+ down(&inject_sem);
spin_lock_irqsave(&data_saved_lock, flags);
retry:
for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) {
@@ -428,6 +516,7 @@ retry:
}
}
spin_unlock_irqrestore(&data_saved_lock, flags);
+ up(&inject_sem);
if (!data->saved_num)
call_on_cpu(cpu, salinfo_log_read_cpu, data);
@@ -590,6 +679,13 @@ salinfo_init(void)
entry->proc_fops = &salinfo_data_fops;
*sdir++ = entry;
+ entry = create_proc_entry("inject", S_IWUSR, dir);
+ if (!entry)
+ continue;
+ entry->data = data;
+ entry->proc_fops = &salinfo_inject_fops;
+ *sdir++ = entry;
+
/* we missed any events before now */
online = 0;
for_each_online_cpu(j) {
reply other threads:[~2005-12-15 7:24 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=13993.1134631487@kao2.melbourne.sgi.com \
--to=kaos@sgi.com \
--cc=linux-ia64@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox